1 /* 2 * QEMU AHCI Emulation 3 * 4 * Copyright (c) 2010 qiaochong@loongson.cn 5 * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com> 6 * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de> 7 * Copyright (c) 2010 Alexander Graf <agraf@suse.de> 8 * 9 * This library is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU Lesser General Public 11 * License as published by the Free Software Foundation; either 12 * version 2 of the License, or (at your option) any later version. 13 * 14 * This library is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * Lesser General Public License for more details. 18 * 19 * You should have received a copy of the GNU Lesser General Public 20 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 21 * 22 */ 23 24 #include <hw/hw.h> 25 #include <hw/pci/msi.h> 26 #include <hw/i386/pc.h> 27 #include <hw/pci/pci.h> 28 #include <hw/sysbus.h> 29 30 #include "qemu/error-report.h" 31 #include "monitor/monitor.h" 32 #include "sysemu/block-backend.h" 33 #include "sysemu/dma.h" 34 #include "internal.h" 35 #include <hw/ide/pci.h> 36 #include <hw/ide/ahci.h> 37 38 #define DEBUG_AHCI 0 39 40 #define DPRINTF(port, fmt, ...) \ 41 do { \ 42 if (DEBUG_AHCI) { \ 43 fprintf(stderr, "ahci: %s: [%d] ", __func__, port); \ 44 fprintf(stderr, fmt, ## __VA_ARGS__); \ 45 } \ 46 } while (0) 47 48 static void check_cmd(AHCIState *s, int port); 49 static int handle_cmd(AHCIState *s,int port,int slot); 50 static void ahci_reset_port(AHCIState *s, int port); 51 static void ahci_write_fis_d2h(AHCIDevice *ad, uint8_t *cmd_fis); 52 static void ahci_init_d2h(AHCIDevice *ad); 53 static int ahci_dma_prepare_buf(IDEDMA *dma, int is_write); 54 static void ahci_commit_buf(IDEDMA *dma, uint32_t tx_bytes); 55 static bool ahci_map_clb_address(AHCIDevice *ad); 56 static bool ahci_map_fis_address(AHCIDevice *ad); 57 static void ahci_unmap_clb_address(AHCIDevice *ad); 58 static void ahci_unmap_fis_address(AHCIDevice *ad); 59 60 61 static uint32_t ahci_port_read(AHCIState *s, int port, int offset) 62 { 63 uint32_t val; 64 AHCIPortRegs *pr; 65 pr = &s->dev[port].port_regs; 66 67 switch (offset) { 68 case PORT_LST_ADDR: 69 val = pr->lst_addr; 70 break; 71 case PORT_LST_ADDR_HI: 72 val = pr->lst_addr_hi; 73 break; 74 case PORT_FIS_ADDR: 75 val = pr->fis_addr; 76 break; 77 case PORT_FIS_ADDR_HI: 78 val = pr->fis_addr_hi; 79 break; 80 case PORT_IRQ_STAT: 81 val = pr->irq_stat; 82 break; 83 case PORT_IRQ_MASK: 84 val = pr->irq_mask; 85 break; 86 case PORT_CMD: 87 val = pr->cmd; 88 break; 89 case PORT_TFDATA: 90 val = pr->tfdata; 91 break; 92 case PORT_SIG: 93 val = pr->sig; 94 break; 95 case PORT_SCR_STAT: 96 if (s->dev[port].port.ifs[0].blk) { 97 val = SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP | 98 SATA_SCR_SSTATUS_SPD_GEN1 | SATA_SCR_SSTATUS_IPM_ACTIVE; 99 } else { 100 val = SATA_SCR_SSTATUS_DET_NODEV; 101 } 102 break; 103 case PORT_SCR_CTL: 104 val = pr->scr_ctl; 105 break; 106 case PORT_SCR_ERR: 107 val = pr->scr_err; 108 break; 109 case PORT_SCR_ACT: 110 pr->scr_act &= ~s->dev[port].finished; 111 s->dev[port].finished = 0; 112 val = pr->scr_act; 113 break; 114 case PORT_CMD_ISSUE: 115 val = pr->cmd_issue; 116 break; 117 case PORT_RESERVED: 118 default: 119 val = 0; 120 } 121 DPRINTF(port, "offset: 0x%x val: 0x%x\n", offset, val); 122 return val; 123 124 } 125 126 static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev) 127 { 128 AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); 129 PCIDevice *pci_dev = 130 (PCIDevice *)object_dynamic_cast(OBJECT(d), TYPE_PCI_DEVICE); 131 132 DPRINTF(0, "raise irq\n"); 133 134 if (pci_dev && msi_enabled(pci_dev)) { 135 msi_notify(pci_dev, 0); 136 } else { 137 qemu_irq_raise(s->irq); 138 } 139 } 140 141 static void ahci_irq_lower(AHCIState *s, AHCIDevice *dev) 142 { 143 AHCIPCIState *d = container_of(s, AHCIPCIState, ahci); 144 PCIDevice *pci_dev = 145 (PCIDevice *)object_dynamic_cast(OBJECT(d), TYPE_PCI_DEVICE); 146 147 DPRINTF(0, "lower irq\n"); 148 149 if (!pci_dev || !msi_enabled(pci_dev)) { 150 qemu_irq_lower(s->irq); 151 } 152 } 153 154 static void ahci_check_irq(AHCIState *s) 155 { 156 int i; 157 158 DPRINTF(-1, "check irq %#x\n", s->control_regs.irqstatus); 159 160 s->control_regs.irqstatus = 0; 161 for (i = 0; i < s->ports; i++) { 162 AHCIPortRegs *pr = &s->dev[i].port_regs; 163 if (pr->irq_stat & pr->irq_mask) { 164 s->control_regs.irqstatus |= (1 << i); 165 } 166 } 167 168 if (s->control_regs.irqstatus && 169 (s->control_regs.ghc & HOST_CTL_IRQ_EN)) { 170 ahci_irq_raise(s, NULL); 171 } else { 172 ahci_irq_lower(s, NULL); 173 } 174 } 175 176 static void ahci_trigger_irq(AHCIState *s, AHCIDevice *d, 177 int irq_type) 178 { 179 DPRINTF(d->port_no, "trigger irq %#x -> %x\n", 180 irq_type, d->port_regs.irq_mask & irq_type); 181 182 d->port_regs.irq_stat |= irq_type; 183 ahci_check_irq(s); 184 } 185 186 static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr, 187 uint32_t wanted) 188 { 189 hwaddr len = wanted; 190 191 if (*ptr) { 192 dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); 193 } 194 195 *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE); 196 if (len < wanted) { 197 dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); 198 *ptr = NULL; 199 } 200 } 201 202 /** 203 * Check the cmd register to see if we should start or stop 204 * the DMA or FIS RX engines. 205 * 206 * @ad: Device to engage. 207 * @allow_stop: Allow device to transition from started to stopped? 208 * 'no' is useful for migration post_load, which does not expect a transition. 209 * 210 * @return 0 on success, -1 on error. 211 */ 212 static int ahci_cond_start_engines(AHCIDevice *ad, bool allow_stop) 213 { 214 AHCIPortRegs *pr = &ad->port_regs; 215 216 if (pr->cmd & PORT_CMD_START) { 217 if (ahci_map_clb_address(ad)) { 218 pr->cmd |= PORT_CMD_LIST_ON; 219 } else { 220 error_report("AHCI: Failed to start DMA engine: " 221 "bad command list buffer address"); 222 return -1; 223 } 224 } else if (pr->cmd & PORT_CMD_LIST_ON) { 225 if (allow_stop) { 226 ahci_unmap_clb_address(ad); 227 pr->cmd = pr->cmd & ~(PORT_CMD_LIST_ON); 228 } else { 229 error_report("AHCI: DMA engine should be off, " 230 "but appears to still be running"); 231 return -1; 232 } 233 } 234 235 if (pr->cmd & PORT_CMD_FIS_RX) { 236 if (ahci_map_fis_address(ad)) { 237 pr->cmd |= PORT_CMD_FIS_ON; 238 } else { 239 error_report("AHCI: Failed to start FIS receive engine: " 240 "bad FIS receive buffer address"); 241 return -1; 242 } 243 } else if (pr->cmd & PORT_CMD_FIS_ON) { 244 if (allow_stop) { 245 ahci_unmap_fis_address(ad); 246 pr->cmd = pr->cmd & ~(PORT_CMD_FIS_ON); 247 } else { 248 error_report("AHCI: FIS receive engine should be off, " 249 "but appears to still be running"); 250 return -1; 251 } 252 } 253 254 return 0; 255 } 256 257 static void ahci_port_write(AHCIState *s, int port, int offset, uint32_t val) 258 { 259 AHCIPortRegs *pr = &s->dev[port].port_regs; 260 261 DPRINTF(port, "offset: 0x%x val: 0x%x\n", offset, val); 262 switch (offset) { 263 case PORT_LST_ADDR: 264 pr->lst_addr = val; 265 break; 266 case PORT_LST_ADDR_HI: 267 pr->lst_addr_hi = val; 268 break; 269 case PORT_FIS_ADDR: 270 pr->fis_addr = val; 271 break; 272 case PORT_FIS_ADDR_HI: 273 pr->fis_addr_hi = val; 274 break; 275 case PORT_IRQ_STAT: 276 pr->irq_stat &= ~val; 277 ahci_check_irq(s); 278 break; 279 case PORT_IRQ_MASK: 280 pr->irq_mask = val & 0xfdc000ff; 281 ahci_check_irq(s); 282 break; 283 case PORT_CMD: 284 /* Block any Read-only fields from being set; 285 * including LIST_ON and FIS_ON. */ 286 pr->cmd = (pr->cmd & PORT_CMD_RO_MASK) | (val & ~PORT_CMD_RO_MASK); 287 288 /* Check FIS RX and CLB engines, allow transition to false: */ 289 ahci_cond_start_engines(&s->dev[port], true); 290 291 /* XXX usually the FIS would be pending on the bus here and 292 issuing deferred until the OS enables FIS receival. 293 Instead, we only submit it once - which works in most 294 cases, but is a hack. */ 295 if ((pr->cmd & PORT_CMD_FIS_ON) && 296 !s->dev[port].init_d2h_sent) { 297 ahci_init_d2h(&s->dev[port]); 298 s->dev[port].init_d2h_sent = true; 299 } 300 301 check_cmd(s, port); 302 break; 303 case PORT_TFDATA: 304 /* Read Only. */ 305 break; 306 case PORT_SIG: 307 /* Read Only */ 308 break; 309 case PORT_SCR_STAT: 310 /* Read Only */ 311 break; 312 case PORT_SCR_CTL: 313 if (((pr->scr_ctl & AHCI_SCR_SCTL_DET) == 1) && 314 ((val & AHCI_SCR_SCTL_DET) == 0)) { 315 ahci_reset_port(s, port); 316 } 317 pr->scr_ctl = val; 318 break; 319 case PORT_SCR_ERR: 320 pr->scr_err &= ~val; 321 break; 322 case PORT_SCR_ACT: 323 /* RW1 */ 324 pr->scr_act |= val; 325 break; 326 case PORT_CMD_ISSUE: 327 pr->cmd_issue |= val; 328 check_cmd(s, port); 329 break; 330 default: 331 break; 332 } 333 } 334 335 static uint64_t ahci_mem_read(void *opaque, hwaddr addr, 336 unsigned size) 337 { 338 AHCIState *s = opaque; 339 uint32_t val = 0; 340 341 if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) { 342 switch (addr) { 343 case HOST_CAP: 344 val = s->control_regs.cap; 345 break; 346 case HOST_CTL: 347 val = s->control_regs.ghc; 348 break; 349 case HOST_IRQ_STAT: 350 val = s->control_regs.irqstatus; 351 break; 352 case HOST_PORTS_IMPL: 353 val = s->control_regs.impl; 354 break; 355 case HOST_VERSION: 356 val = s->control_regs.version; 357 break; 358 } 359 360 DPRINTF(-1, "(addr 0x%08X), val 0x%08X\n", (unsigned) addr, val); 361 } else if ((addr >= AHCI_PORT_REGS_START_ADDR) && 362 (addr < (AHCI_PORT_REGS_START_ADDR + 363 (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) { 364 val = ahci_port_read(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7, 365 addr & AHCI_PORT_ADDR_OFFSET_MASK); 366 } 367 368 return val; 369 } 370 371 372 373 static void ahci_mem_write(void *opaque, hwaddr addr, 374 uint64_t val, unsigned size) 375 { 376 AHCIState *s = opaque; 377 378 /* Only aligned reads are allowed on AHCI */ 379 if (addr & 3) { 380 fprintf(stderr, "ahci: Mis-aligned write to addr 0x" 381 TARGET_FMT_plx "\n", addr); 382 return; 383 } 384 385 if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) { 386 DPRINTF(-1, "(addr 0x%08X), val 0x%08"PRIX64"\n", (unsigned) addr, val); 387 388 switch (addr) { 389 case HOST_CAP: /* R/WO, RO */ 390 /* FIXME handle R/WO */ 391 break; 392 case HOST_CTL: /* R/W */ 393 if (val & HOST_CTL_RESET) { 394 DPRINTF(-1, "HBA Reset\n"); 395 ahci_reset(s); 396 } else { 397 s->control_regs.ghc = (val & 0x3) | HOST_CTL_AHCI_EN; 398 ahci_check_irq(s); 399 } 400 break; 401 case HOST_IRQ_STAT: /* R/WC, RO */ 402 s->control_regs.irqstatus &= ~val; 403 ahci_check_irq(s); 404 break; 405 case HOST_PORTS_IMPL: /* R/WO, RO */ 406 /* FIXME handle R/WO */ 407 break; 408 case HOST_VERSION: /* RO */ 409 /* FIXME report write? */ 410 break; 411 default: 412 DPRINTF(-1, "write to unknown register 0x%x\n", (unsigned)addr); 413 } 414 } else if ((addr >= AHCI_PORT_REGS_START_ADDR) && 415 (addr < (AHCI_PORT_REGS_START_ADDR + 416 (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) { 417 ahci_port_write(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7, 418 addr & AHCI_PORT_ADDR_OFFSET_MASK, val); 419 } 420 421 } 422 423 static const MemoryRegionOps ahci_mem_ops = { 424 .read = ahci_mem_read, 425 .write = ahci_mem_write, 426 .endianness = DEVICE_LITTLE_ENDIAN, 427 }; 428 429 static uint64_t ahci_idp_read(void *opaque, hwaddr addr, 430 unsigned size) 431 { 432 AHCIState *s = opaque; 433 434 if (addr == s->idp_offset) { 435 /* index register */ 436 return s->idp_index; 437 } else if (addr == s->idp_offset + 4) { 438 /* data register - do memory read at location selected by index */ 439 return ahci_mem_read(opaque, s->idp_index, size); 440 } else { 441 return 0; 442 } 443 } 444 445 static void ahci_idp_write(void *opaque, hwaddr addr, 446 uint64_t val, unsigned size) 447 { 448 AHCIState *s = opaque; 449 450 if (addr == s->idp_offset) { 451 /* index register - mask off reserved bits */ 452 s->idp_index = (uint32_t)val & ((AHCI_MEM_BAR_SIZE - 1) & ~3); 453 } else if (addr == s->idp_offset + 4) { 454 /* data register - do memory write at location selected by index */ 455 ahci_mem_write(opaque, s->idp_index, val, size); 456 } 457 } 458 459 static const MemoryRegionOps ahci_idp_ops = { 460 .read = ahci_idp_read, 461 .write = ahci_idp_write, 462 .endianness = DEVICE_LITTLE_ENDIAN, 463 }; 464 465 466 static void ahci_reg_init(AHCIState *s) 467 { 468 int i; 469 470 s->control_regs.cap = (s->ports - 1) | 471 (AHCI_NUM_COMMAND_SLOTS << 8) | 472 (AHCI_SUPPORTED_SPEED_GEN1 << AHCI_SUPPORTED_SPEED) | 473 HOST_CAP_NCQ | HOST_CAP_AHCI; 474 475 s->control_regs.impl = (1 << s->ports) - 1; 476 477 s->control_regs.version = AHCI_VERSION_1_0; 478 479 for (i = 0; i < s->ports; i++) { 480 s->dev[i].port_state = STATE_RUN; 481 } 482 } 483 484 static void check_cmd(AHCIState *s, int port) 485 { 486 AHCIPortRegs *pr = &s->dev[port].port_regs; 487 int slot; 488 489 if ((pr->cmd & PORT_CMD_START) && pr->cmd_issue) { 490 for (slot = 0; (slot < 32) && pr->cmd_issue; slot++) { 491 if ((pr->cmd_issue & (1U << slot)) && 492 !handle_cmd(s, port, slot)) { 493 pr->cmd_issue &= ~(1U << slot); 494 } 495 } 496 } 497 } 498 499 static void ahci_check_cmd_bh(void *opaque) 500 { 501 AHCIDevice *ad = opaque; 502 503 qemu_bh_delete(ad->check_bh); 504 ad->check_bh = NULL; 505 506 if ((ad->busy_slot != -1) && 507 !(ad->port.ifs[0].status & (BUSY_STAT|DRQ_STAT))) { 508 /* no longer busy */ 509 ad->port_regs.cmd_issue &= ~(1 << ad->busy_slot); 510 ad->busy_slot = -1; 511 } 512 513 check_cmd(ad->hba, ad->port_no); 514 } 515 516 static void ahci_init_d2h(AHCIDevice *ad) 517 { 518 uint8_t init_fis[20]; 519 IDEState *ide_state = &ad->port.ifs[0]; 520 521 memset(init_fis, 0, sizeof(init_fis)); 522 523 init_fis[4] = 1; 524 init_fis[12] = 1; 525 526 if (ide_state->drive_kind == IDE_CD) { 527 init_fis[5] = ide_state->lcyl; 528 init_fis[6] = ide_state->hcyl; 529 } 530 531 ahci_write_fis_d2h(ad, init_fis); 532 } 533 534 static void ahci_reset_port(AHCIState *s, int port) 535 { 536 AHCIDevice *d = &s->dev[port]; 537 AHCIPortRegs *pr = &d->port_regs; 538 IDEState *ide_state = &d->port.ifs[0]; 539 int i; 540 541 DPRINTF(port, "reset port\n"); 542 543 ide_bus_reset(&d->port); 544 ide_state->ncq_queues = AHCI_MAX_CMDS; 545 546 pr->scr_stat = 0; 547 pr->scr_err = 0; 548 pr->scr_act = 0; 549 pr->tfdata = 0x7F; 550 pr->sig = 0xFFFFFFFF; 551 d->busy_slot = -1; 552 d->init_d2h_sent = false; 553 554 ide_state = &s->dev[port].port.ifs[0]; 555 if (!ide_state->blk) { 556 return; 557 } 558 559 /* reset ncq queue */ 560 for (i = 0; i < AHCI_MAX_CMDS; i++) { 561 NCQTransferState *ncq_tfs = &s->dev[port].ncq_tfs[i]; 562 if (!ncq_tfs->used) { 563 continue; 564 } 565 566 if (ncq_tfs->aiocb) { 567 blk_aio_cancel(ncq_tfs->aiocb); 568 ncq_tfs->aiocb = NULL; 569 } 570 571 /* Maybe we just finished the request thanks to blk_aio_cancel() */ 572 if (!ncq_tfs->used) { 573 continue; 574 } 575 576 qemu_sglist_destroy(&ncq_tfs->sglist); 577 ncq_tfs->used = 0; 578 } 579 580 s->dev[port].port_state = STATE_RUN; 581 if (!ide_state->blk) { 582 pr->sig = 0; 583 ide_state->status = SEEK_STAT | WRERR_STAT; 584 } else if (ide_state->drive_kind == IDE_CD) { 585 pr->sig = SATA_SIGNATURE_CDROM; 586 ide_state->lcyl = 0x14; 587 ide_state->hcyl = 0xeb; 588 DPRINTF(port, "set lcyl = %d\n", ide_state->lcyl); 589 ide_state->status = SEEK_STAT | WRERR_STAT | READY_STAT; 590 } else { 591 pr->sig = SATA_SIGNATURE_DISK; 592 ide_state->status = SEEK_STAT | WRERR_STAT; 593 } 594 595 ide_state->error = 1; 596 ahci_init_d2h(d); 597 } 598 599 static void debug_print_fis(uint8_t *fis, int cmd_len) 600 { 601 #if DEBUG_AHCI 602 int i; 603 604 fprintf(stderr, "fis:"); 605 for (i = 0; i < cmd_len; i++) { 606 if ((i & 0xf) == 0) { 607 fprintf(stderr, "\n%02x:",i); 608 } 609 fprintf(stderr, "%02x ",fis[i]); 610 } 611 fprintf(stderr, "\n"); 612 #endif 613 } 614 615 static bool ahci_map_fis_address(AHCIDevice *ad) 616 { 617 AHCIPortRegs *pr = &ad->port_regs; 618 map_page(ad->hba->as, &ad->res_fis, 619 ((uint64_t)pr->fis_addr_hi << 32) | pr->fis_addr, 256); 620 return ad->res_fis != NULL; 621 } 622 623 static void ahci_unmap_fis_address(AHCIDevice *ad) 624 { 625 dma_memory_unmap(ad->hba->as, ad->res_fis, 256, 626 DMA_DIRECTION_FROM_DEVICE, 256); 627 ad->res_fis = NULL; 628 } 629 630 static bool ahci_map_clb_address(AHCIDevice *ad) 631 { 632 AHCIPortRegs *pr = &ad->port_regs; 633 ad->cur_cmd = NULL; 634 map_page(ad->hba->as, &ad->lst, 635 ((uint64_t)pr->lst_addr_hi << 32) | pr->lst_addr, 1024); 636 return ad->lst != NULL; 637 } 638 639 static void ahci_unmap_clb_address(AHCIDevice *ad) 640 { 641 dma_memory_unmap(ad->hba->as, ad->lst, 1024, 642 DMA_DIRECTION_FROM_DEVICE, 1024); 643 ad->lst = NULL; 644 } 645 646 static void ahci_write_fis_sdb(AHCIState *s, int port, uint32_t finished) 647 { 648 AHCIDevice *ad = &s->dev[port]; 649 AHCIPortRegs *pr = &ad->port_regs; 650 IDEState *ide_state; 651 SDBFIS *sdb_fis; 652 653 if (!s->dev[port].res_fis || 654 !(pr->cmd & PORT_CMD_FIS_RX)) { 655 return; 656 } 657 658 sdb_fis = (SDBFIS *)&ad->res_fis[RES_FIS_SDBFIS]; 659 ide_state = &ad->port.ifs[0]; 660 661 sdb_fis->type = SATA_FIS_TYPE_SDB; 662 /* Interrupt pending & Notification bit */ 663 sdb_fis->flags = (ad->hba->control_regs.irqstatus ? (1 << 6) : 0); 664 sdb_fis->status = ide_state->status & 0x77; 665 sdb_fis->error = ide_state->error; 666 /* update SAct field in SDB_FIS */ 667 s->dev[port].finished |= finished; 668 sdb_fis->payload = cpu_to_le32(ad->finished); 669 670 /* Update shadow registers (except BSY 0x80 and DRQ 0x08) */ 671 pr->tfdata = (ad->port.ifs[0].error << 8) | 672 (ad->port.ifs[0].status & 0x77) | 673 (pr->tfdata & 0x88); 674 675 ahci_trigger_irq(s, ad, PORT_IRQ_SDB_FIS); 676 } 677 678 static void ahci_write_fis_pio(AHCIDevice *ad, uint16_t len) 679 { 680 AHCIPortRegs *pr = &ad->port_regs; 681 uint8_t *pio_fis, *cmd_fis; 682 uint64_t tbl_addr; 683 dma_addr_t cmd_len = 0x80; 684 IDEState *s = &ad->port.ifs[0]; 685 686 if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) { 687 return; 688 } 689 690 /* map cmd_fis */ 691 tbl_addr = le64_to_cpu(ad->cur_cmd->tbl_addr); 692 cmd_fis = dma_memory_map(ad->hba->as, tbl_addr, &cmd_len, 693 DMA_DIRECTION_TO_DEVICE); 694 695 if (cmd_fis == NULL) { 696 DPRINTF(ad->port_no, "dma_memory_map failed in ahci_write_fis_pio"); 697 ahci_trigger_irq(ad->hba, ad, PORT_IRQ_HBUS_ERR); 698 return; 699 } 700 701 if (cmd_len != 0x80) { 702 DPRINTF(ad->port_no, 703 "dma_memory_map mapped too few bytes in ahci_write_fis_pio"); 704 dma_memory_unmap(ad->hba->as, cmd_fis, cmd_len, 705 DMA_DIRECTION_TO_DEVICE, cmd_len); 706 ahci_trigger_irq(ad->hba, ad, PORT_IRQ_HBUS_ERR); 707 return; 708 } 709 710 pio_fis = &ad->res_fis[RES_FIS_PSFIS]; 711 712 pio_fis[0] = SATA_FIS_TYPE_PIO_SETUP; 713 pio_fis[1] = (ad->hba->control_regs.irqstatus ? (1 << 6) : 0); 714 pio_fis[2] = s->status; 715 pio_fis[3] = s->error; 716 717 pio_fis[4] = s->sector; 718 pio_fis[5] = s->lcyl; 719 pio_fis[6] = s->hcyl; 720 pio_fis[7] = s->select; 721 pio_fis[8] = s->hob_sector; 722 pio_fis[9] = s->hob_lcyl; 723 pio_fis[10] = s->hob_hcyl; 724 pio_fis[11] = 0; 725 pio_fis[12] = cmd_fis[12]; 726 pio_fis[13] = cmd_fis[13]; 727 pio_fis[14] = 0; 728 pio_fis[15] = s->status; 729 pio_fis[16] = len & 255; 730 pio_fis[17] = len >> 8; 731 pio_fis[18] = 0; 732 pio_fis[19] = 0; 733 734 /* Update shadow registers: */ 735 pr->tfdata = (ad->port.ifs[0].error << 8) | 736 ad->port.ifs[0].status; 737 738 if (pio_fis[2] & ERR_STAT) { 739 ahci_trigger_irq(ad->hba, ad, PORT_IRQ_TF_ERR); 740 } 741 742 ahci_trigger_irq(ad->hba, ad, PORT_IRQ_PIOS_FIS); 743 744 dma_memory_unmap(ad->hba->as, cmd_fis, cmd_len, 745 DMA_DIRECTION_TO_DEVICE, cmd_len); 746 } 747 748 static void ahci_write_fis_d2h(AHCIDevice *ad, uint8_t *cmd_fis) 749 { 750 AHCIPortRegs *pr = &ad->port_regs; 751 uint8_t *d2h_fis; 752 int i; 753 dma_addr_t cmd_len = 0x80; 754 int cmd_mapped = 0; 755 IDEState *s = &ad->port.ifs[0]; 756 757 if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) { 758 return; 759 } 760 761 if (!cmd_fis) { 762 /* map cmd_fis */ 763 uint64_t tbl_addr = le64_to_cpu(ad->cur_cmd->tbl_addr); 764 cmd_fis = dma_memory_map(ad->hba->as, tbl_addr, &cmd_len, 765 DMA_DIRECTION_TO_DEVICE); 766 cmd_mapped = 1; 767 } 768 769 d2h_fis = &ad->res_fis[RES_FIS_RFIS]; 770 771 d2h_fis[0] = SATA_FIS_TYPE_REGISTER_D2H; 772 d2h_fis[1] = (ad->hba->control_regs.irqstatus ? (1 << 6) : 0); 773 d2h_fis[2] = s->status; 774 d2h_fis[3] = s->error; 775 776 d2h_fis[4] = s->sector; 777 d2h_fis[5] = s->lcyl; 778 d2h_fis[6] = s->hcyl; 779 d2h_fis[7] = s->select; 780 d2h_fis[8] = s->hob_sector; 781 d2h_fis[9] = s->hob_lcyl; 782 d2h_fis[10] = s->hob_hcyl; 783 d2h_fis[11] = 0; 784 d2h_fis[12] = cmd_fis[12]; 785 d2h_fis[13] = cmd_fis[13]; 786 for (i = 14; i < 20; i++) { 787 d2h_fis[i] = 0; 788 } 789 790 /* Update shadow registers: */ 791 pr->tfdata = (ad->port.ifs[0].error << 8) | 792 ad->port.ifs[0].status; 793 794 if (d2h_fis[2] & ERR_STAT) { 795 ahci_trigger_irq(ad->hba, ad, PORT_IRQ_TF_ERR); 796 } 797 798 ahci_trigger_irq(ad->hba, ad, PORT_IRQ_D2H_REG_FIS); 799 800 if (cmd_mapped) { 801 dma_memory_unmap(ad->hba->as, cmd_fis, cmd_len, 802 DMA_DIRECTION_TO_DEVICE, cmd_len); 803 } 804 } 805 806 static int prdt_tbl_entry_size(const AHCI_SG *tbl) 807 { 808 return (le32_to_cpu(tbl->flags_size) & AHCI_PRDT_SIZE_MASK) + 1; 809 } 810 811 static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist, 812 int32_t offset) 813 { 814 AHCICmdHdr *cmd = ad->cur_cmd; 815 uint32_t opts = le32_to_cpu(cmd->opts); 816 uint64_t prdt_addr = le64_to_cpu(cmd->tbl_addr) + 0x80; 817 int sglist_alloc_hint = opts >> AHCI_CMD_HDR_PRDT_LEN; 818 dma_addr_t prdt_len = (sglist_alloc_hint * sizeof(AHCI_SG)); 819 dma_addr_t real_prdt_len = prdt_len; 820 uint8_t *prdt; 821 int i; 822 int r = 0; 823 uint64_t sum = 0; 824 int off_idx = -1; 825 int64_t off_pos = -1; 826 int tbl_entry_size; 827 IDEBus *bus = &ad->port; 828 BusState *qbus = BUS(bus); 829 830 /* 831 * Note: AHCI PRDT can describe up to 256GiB. SATA/ATA only support 832 * transactions of up to 32MiB as of ATA8-ACS3 rev 1b, assuming a 833 * 512 byte sector size. We limit the PRDT in this implementation to 834 * a reasonably large 2GiB, which can accommodate the maximum transfer 835 * request for sector sizes up to 32K. 836 */ 837 838 if (!sglist_alloc_hint) { 839 DPRINTF(ad->port_no, "no sg list given by guest: 0x%08x\n", opts); 840 return -1; 841 } 842 843 /* map PRDT */ 844 if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len, 845 DMA_DIRECTION_TO_DEVICE))){ 846 DPRINTF(ad->port_no, "map failed\n"); 847 return -1; 848 } 849 850 if (prdt_len < real_prdt_len) { 851 DPRINTF(ad->port_no, "mapped less than expected\n"); 852 r = -1; 853 goto out; 854 } 855 856 /* Get entries in the PRDT, init a qemu sglist accordingly */ 857 if (sglist_alloc_hint > 0) { 858 AHCI_SG *tbl = (AHCI_SG *)prdt; 859 sum = 0; 860 for (i = 0; i < sglist_alloc_hint; i++) { 861 /* flags_size is zero-based */ 862 tbl_entry_size = prdt_tbl_entry_size(&tbl[i]); 863 if (offset <= (sum + tbl_entry_size)) { 864 off_idx = i; 865 off_pos = offset - sum; 866 break; 867 } 868 sum += tbl_entry_size; 869 } 870 if ((off_idx == -1) || (off_pos < 0) || (off_pos > tbl_entry_size)) { 871 DPRINTF(ad->port_no, "%s: Incorrect offset! " 872 "off_idx: %d, off_pos: %"PRId64"\n", 873 __func__, off_idx, off_pos); 874 r = -1; 875 goto out; 876 } 877 878 qemu_sglist_init(sglist, qbus->parent, (sglist_alloc_hint - off_idx), 879 ad->hba->as); 880 qemu_sglist_add(sglist, le64_to_cpu(tbl[off_idx].addr) + off_pos, 881 prdt_tbl_entry_size(&tbl[off_idx]) - off_pos); 882 883 for (i = off_idx + 1; i < sglist_alloc_hint; i++) { 884 /* flags_size is zero-based */ 885 qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr), 886 prdt_tbl_entry_size(&tbl[i])); 887 if (sglist->size > INT32_MAX) { 888 error_report("AHCI Physical Region Descriptor Table describes " 889 "more than 2 GiB.\n"); 890 qemu_sglist_destroy(sglist); 891 r = -1; 892 goto out; 893 } 894 } 895 } 896 897 out: 898 dma_memory_unmap(ad->hba->as, prdt, prdt_len, 899 DMA_DIRECTION_TO_DEVICE, prdt_len); 900 return r; 901 } 902 903 static void ncq_cb(void *opaque, int ret) 904 { 905 NCQTransferState *ncq_tfs = (NCQTransferState *)opaque; 906 IDEState *ide_state = &ncq_tfs->drive->port.ifs[0]; 907 908 if (ret == -ECANCELED) { 909 return; 910 } 911 /* Clear bit for this tag in SActive */ 912 ncq_tfs->drive->port_regs.scr_act &= ~(1 << ncq_tfs->tag); 913 914 if (ret < 0) { 915 /* error */ 916 ide_state->error = ABRT_ERR; 917 ide_state->status = READY_STAT | ERR_STAT; 918 ncq_tfs->drive->port_regs.scr_err |= (1 << ncq_tfs->tag); 919 } else { 920 ide_state->status = READY_STAT | SEEK_STAT; 921 } 922 923 ahci_write_fis_sdb(ncq_tfs->drive->hba, ncq_tfs->drive->port_no, 924 (1 << ncq_tfs->tag)); 925 926 DPRINTF(ncq_tfs->drive->port_no, "NCQ transfer tag %d finished\n", 927 ncq_tfs->tag); 928 929 block_acct_done(blk_get_stats(ncq_tfs->drive->port.ifs[0].blk), 930 &ncq_tfs->acct); 931 qemu_sglist_destroy(&ncq_tfs->sglist); 932 ncq_tfs->used = 0; 933 } 934 935 static int is_ncq(uint8_t ata_cmd) 936 { 937 /* Based on SATA 3.2 section 13.6.3.2 */ 938 switch (ata_cmd) { 939 case READ_FPDMA_QUEUED: 940 case WRITE_FPDMA_QUEUED: 941 case NCQ_NON_DATA: 942 case RECEIVE_FPDMA_QUEUED: 943 case SEND_FPDMA_QUEUED: 944 return 1; 945 default: 946 return 0; 947 } 948 } 949 950 static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis, 951 int slot) 952 { 953 NCQFrame *ncq_fis = (NCQFrame*)cmd_fis; 954 uint8_t tag = ncq_fis->tag >> 3; 955 NCQTransferState *ncq_tfs = &s->dev[port].ncq_tfs[tag]; 956 957 if (ncq_tfs->used) { 958 /* error - already in use */ 959 fprintf(stderr, "%s: tag %d already used\n", __FUNCTION__, tag); 960 return; 961 } 962 963 ncq_tfs->used = 1; 964 ncq_tfs->drive = &s->dev[port]; 965 ncq_tfs->slot = slot; 966 ncq_tfs->lba = ((uint64_t)ncq_fis->lba5 << 40) | 967 ((uint64_t)ncq_fis->lba4 << 32) | 968 ((uint64_t)ncq_fis->lba3 << 24) | 969 ((uint64_t)ncq_fis->lba2 << 16) | 970 ((uint64_t)ncq_fis->lba1 << 8) | 971 (uint64_t)ncq_fis->lba0; 972 973 /* Note: We calculate the sector count, but don't currently rely on it. 974 * The total size of the DMA buffer tells us the transfer size instead. */ 975 ncq_tfs->sector_count = ((uint16_t)ncq_fis->sector_count_high << 8) | 976 ncq_fis->sector_count_low; 977 978 DPRINTF(port, "NCQ transfer LBA from %"PRId64" to %"PRId64", " 979 "drive max %"PRId64"\n", 980 ncq_tfs->lba, ncq_tfs->lba + ncq_tfs->sector_count - 2, 981 s->dev[port].port.ifs[0].nb_sectors - 1); 982 983 ahci_populate_sglist(&s->dev[port], &ncq_tfs->sglist, 0); 984 ncq_tfs->tag = tag; 985 986 switch(ncq_fis->command) { 987 case READ_FPDMA_QUEUED: 988 DPRINTF(port, "NCQ reading %d sectors from LBA %"PRId64", " 989 "tag %d\n", 990 ncq_tfs->sector_count-1, ncq_tfs->lba, ncq_tfs->tag); 991 992 DPRINTF(port, "tag %d aio read %"PRId64"\n", 993 ncq_tfs->tag, ncq_tfs->lba); 994 995 dma_acct_start(ncq_tfs->drive->port.ifs[0].blk, &ncq_tfs->acct, 996 &ncq_tfs->sglist, BLOCK_ACCT_READ); 997 ncq_tfs->aiocb = dma_blk_read(ncq_tfs->drive->port.ifs[0].blk, 998 &ncq_tfs->sglist, ncq_tfs->lba, 999 ncq_cb, ncq_tfs); 1000 break; 1001 case WRITE_FPDMA_QUEUED: 1002 DPRINTF(port, "NCQ writing %d sectors to LBA %"PRId64", tag %d\n", 1003 ncq_tfs->sector_count-1, ncq_tfs->lba, ncq_tfs->tag); 1004 1005 DPRINTF(port, "tag %d aio write %"PRId64"\n", 1006 ncq_tfs->tag, ncq_tfs->lba); 1007 1008 dma_acct_start(ncq_tfs->drive->port.ifs[0].blk, &ncq_tfs->acct, 1009 &ncq_tfs->sglist, BLOCK_ACCT_WRITE); 1010 ncq_tfs->aiocb = dma_blk_write(ncq_tfs->drive->port.ifs[0].blk, 1011 &ncq_tfs->sglist, ncq_tfs->lba, 1012 ncq_cb, ncq_tfs); 1013 break; 1014 default: 1015 if (is_ncq(cmd_fis[2])) { 1016 DPRINTF(port, 1017 "error: unsupported NCQ command (0x%02x) received\n", 1018 cmd_fis[2]); 1019 } else { 1020 DPRINTF(port, 1021 "error: tried to process non-NCQ command as NCQ\n"); 1022 } 1023 qemu_sglist_destroy(&ncq_tfs->sglist); 1024 } 1025 } 1026 1027 static void handle_reg_h2d_fis(AHCIState *s, int port, 1028 int slot, uint8_t *cmd_fis) 1029 { 1030 IDEState *ide_state = &s->dev[port].port.ifs[0]; 1031 AHCICmdHdr *cmd = s->dev[port].cur_cmd; 1032 uint32_t opts = le32_to_cpu(cmd->opts); 1033 1034 if (cmd_fis[1] & 0x0F) { 1035 DPRINTF(port, "Port Multiplier not supported." 1036 " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n", 1037 cmd_fis[0], cmd_fis[1], cmd_fis[2]); 1038 return; 1039 } 1040 1041 if (cmd_fis[1] & 0x70) { 1042 DPRINTF(port, "Reserved flags set in H2D Register FIS." 1043 " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n", 1044 cmd_fis[0], cmd_fis[1], cmd_fis[2]); 1045 return; 1046 } 1047 1048 if (!(cmd_fis[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER)) { 1049 switch (s->dev[port].port_state) { 1050 case STATE_RUN: 1051 if (cmd_fis[15] & ATA_SRST) { 1052 s->dev[port].port_state = STATE_RESET; 1053 } 1054 break; 1055 case STATE_RESET: 1056 if (!(cmd_fis[15] & ATA_SRST)) { 1057 ahci_reset_port(s, port); 1058 } 1059 break; 1060 } 1061 return; 1062 } 1063 1064 /* Check for NCQ command */ 1065 if (is_ncq(cmd_fis[2])) { 1066 process_ncq_command(s, port, cmd_fis, slot); 1067 return; 1068 } 1069 1070 /* Decompose the FIS: 1071 * AHCI does not interpret FIS packets, it only forwards them. 1072 * SATA 1.0 describes how to decode LBA28 and CHS FIS packets. 1073 * Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets. 1074 * 1075 * ATA4 describes sector number for LBA28/CHS commands. 1076 * ATA6 describes sector number for LBA48 commands. 1077 * ATA8 deprecates CHS fully, describing only LBA28/48. 1078 * 1079 * We dutifully convert the FIS into IDE registers, and allow the 1080 * core layer to interpret them as needed. */ 1081 ide_state->feature = cmd_fis[3]; 1082 ide_state->sector = cmd_fis[4]; /* LBA 7:0 */ 1083 ide_state->lcyl = cmd_fis[5]; /* LBA 15:8 */ 1084 ide_state->hcyl = cmd_fis[6]; /* LBA 23:16 */ 1085 ide_state->select = cmd_fis[7]; /* LBA 27:24 (LBA28) */ 1086 ide_state->hob_sector = cmd_fis[8]; /* LBA 31:24 */ 1087 ide_state->hob_lcyl = cmd_fis[9]; /* LBA 39:32 */ 1088 ide_state->hob_hcyl = cmd_fis[10]; /* LBA 47:40 */ 1089 ide_state->hob_feature = cmd_fis[11]; 1090 ide_state->nsector = (int64_t)((cmd_fis[13] << 8) | cmd_fis[12]); 1091 /* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */ 1092 /* 15: Only valid when UPDATE_COMMAND not set. */ 1093 1094 /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command 1095 * table to ide_state->io_buffer */ 1096 if (opts & AHCI_CMD_ATAPI) { 1097 memcpy(ide_state->io_buffer, &cmd_fis[AHCI_COMMAND_TABLE_ACMD], 0x10); 1098 debug_print_fis(ide_state->io_buffer, 0x10); 1099 s->dev[port].done_atapi_packet = false; 1100 /* XXX send PIO setup FIS */ 1101 } 1102 1103 ide_state->error = 0; 1104 1105 /* Reset transferred byte counter */ 1106 cmd->status = 0; 1107 1108 /* We're ready to process the command in FIS byte 2. */ 1109 ide_exec_cmd(&s->dev[port].port, cmd_fis[2]); 1110 } 1111 1112 static int handle_cmd(AHCIState *s, int port, int slot) 1113 { 1114 IDEState *ide_state; 1115 uint64_t tbl_addr; 1116 AHCICmdHdr *cmd; 1117 uint8_t *cmd_fis; 1118 dma_addr_t cmd_len; 1119 1120 if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { 1121 /* Engine currently busy, try again later */ 1122 DPRINTF(port, "engine busy\n"); 1123 return -1; 1124 } 1125 1126 if (!s->dev[port].lst) { 1127 DPRINTF(port, "error: lst not given but cmd handled"); 1128 return -1; 1129 } 1130 cmd = &((AHCICmdHdr *)s->dev[port].lst)[slot]; 1131 /* remember current slot handle for later */ 1132 s->dev[port].cur_cmd = cmd; 1133 1134 /* The device we are working for */ 1135 ide_state = &s->dev[port].port.ifs[0]; 1136 if (!ide_state->blk) { 1137 DPRINTF(port, "error: guest accessed unused port"); 1138 return -1; 1139 } 1140 1141 tbl_addr = le64_to_cpu(cmd->tbl_addr); 1142 cmd_len = 0x80; 1143 cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len, 1144 DMA_DIRECTION_FROM_DEVICE); 1145 if (!cmd_fis) { 1146 DPRINTF(port, "error: guest passed us an invalid cmd fis\n"); 1147 return -1; 1148 } else if (cmd_len != 0x80) { 1149 ahci_trigger_irq(s, &s->dev[port], PORT_IRQ_HBUS_ERR); 1150 DPRINTF(port, "error: dma_memory_map failed: " 1151 "(len(%02"PRIx64") != 0x80)\n", 1152 cmd_len); 1153 goto out; 1154 } 1155 debug_print_fis(cmd_fis, 0x80); 1156 1157 switch (cmd_fis[0]) { 1158 case SATA_FIS_TYPE_REGISTER_H2D: 1159 handle_reg_h2d_fis(s, port, slot, cmd_fis); 1160 break; 1161 default: 1162 DPRINTF(port, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x " 1163 "cmd_fis[2]=%02x\n", cmd_fis[0], cmd_fis[1], 1164 cmd_fis[2]); 1165 break; 1166 } 1167 1168 out: 1169 dma_memory_unmap(s->as, cmd_fis, cmd_len, DMA_DIRECTION_FROM_DEVICE, 1170 cmd_len); 1171 1172 if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { 1173 /* async command, complete later */ 1174 s->dev[port].busy_slot = slot; 1175 return -1; 1176 } 1177 1178 /* done handling the command */ 1179 return 0; 1180 } 1181 1182 /* DMA dev <-> ram */ 1183 static void ahci_start_transfer(IDEDMA *dma) 1184 { 1185 AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); 1186 IDEState *s = &ad->port.ifs[0]; 1187 uint32_t size = (uint32_t)(s->data_end - s->data_ptr); 1188 /* write == ram -> device */ 1189 uint32_t opts = le32_to_cpu(ad->cur_cmd->opts); 1190 int is_write = opts & AHCI_CMD_WRITE; 1191 int is_atapi = opts & AHCI_CMD_ATAPI; 1192 int has_sglist = 0; 1193 1194 if (is_atapi && !ad->done_atapi_packet) { 1195 /* already prepopulated iobuffer */ 1196 ad->done_atapi_packet = true; 1197 size = 0; 1198 goto out; 1199 } 1200 1201 if (ahci_dma_prepare_buf(dma, is_write)) { 1202 has_sglist = 1; 1203 } 1204 1205 DPRINTF(ad->port_no, "%sing %d bytes on %s w/%s sglist\n", 1206 is_write ? "writ" : "read", size, is_atapi ? "atapi" : "ata", 1207 has_sglist ? "" : "o"); 1208 1209 if (has_sglist && size) { 1210 if (is_write) { 1211 dma_buf_write(s->data_ptr, size, &s->sg); 1212 } else { 1213 dma_buf_read(s->data_ptr, size, &s->sg); 1214 } 1215 } 1216 1217 out: 1218 /* declare that we processed everything */ 1219 s->data_ptr = s->data_end; 1220 1221 /* Update number of transferred bytes, destroy sglist */ 1222 ahci_commit_buf(dma, size); 1223 1224 s->end_transfer_func(s); 1225 1226 if (!(s->status & DRQ_STAT)) { 1227 /* done with PIO send/receive */ 1228 ahci_write_fis_pio(ad, le32_to_cpu(ad->cur_cmd->status)); 1229 } 1230 } 1231 1232 static void ahci_start_dma(IDEDMA *dma, IDEState *s, 1233 BlockCompletionFunc *dma_cb) 1234 { 1235 AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); 1236 DPRINTF(ad->port_no, "\n"); 1237 s->io_buffer_offset = 0; 1238 dma_cb(s, 0); 1239 } 1240 1241 static void ahci_restart_dma(IDEDMA *dma) 1242 { 1243 /* Nothing to do, ahci_start_dma already resets s->io_buffer_offset. */ 1244 } 1245 1246 /** 1247 * Called in DMA R/W chains to read the PRDT, utilizing ahci_populate_sglist. 1248 * Not currently invoked by PIO R/W chains, 1249 * which invoke ahci_populate_sglist via ahci_start_transfer. 1250 */ 1251 static int32_t ahci_dma_prepare_buf(IDEDMA *dma, int is_write) 1252 { 1253 AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); 1254 IDEState *s = &ad->port.ifs[0]; 1255 1256 if (ahci_populate_sglist(ad, &s->sg, s->io_buffer_offset) == -1) { 1257 DPRINTF(ad->port_no, "ahci_dma_prepare_buf failed.\n"); 1258 return -1; 1259 } 1260 s->io_buffer_size = s->sg.size; 1261 1262 DPRINTF(ad->port_no, "len=%#x\n", s->io_buffer_size); 1263 return s->io_buffer_size; 1264 } 1265 1266 /** 1267 * Destroys the scatter-gather list, 1268 * and updates the command header with a bytes-read value. 1269 * called explicitly via ahci_dma_rw_buf (ATAPI DMA), 1270 * and ahci_start_transfer (PIO R/W), 1271 * and called via callback from ide_dma_cb for DMA R/W paths. 1272 */ 1273 static void ahci_commit_buf(IDEDMA *dma, uint32_t tx_bytes) 1274 { 1275 AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); 1276 IDEState *s = &ad->port.ifs[0]; 1277 1278 tx_bytes += le32_to_cpu(ad->cur_cmd->status); 1279 ad->cur_cmd->status = cpu_to_le32(tx_bytes); 1280 1281 qemu_sglist_destroy(&s->sg); 1282 } 1283 1284 static int ahci_dma_rw_buf(IDEDMA *dma, int is_write) 1285 { 1286 AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); 1287 IDEState *s = &ad->port.ifs[0]; 1288 uint8_t *p = s->io_buffer + s->io_buffer_index; 1289 int l = s->io_buffer_size - s->io_buffer_index; 1290 1291 if (ahci_populate_sglist(ad, &s->sg, s->io_buffer_offset)) { 1292 return 0; 1293 } 1294 1295 if (is_write) { 1296 dma_buf_read(p, l, &s->sg); 1297 } else { 1298 dma_buf_write(p, l, &s->sg); 1299 } 1300 1301 /* free sglist, update byte count */ 1302 ahci_commit_buf(dma, l); 1303 1304 s->io_buffer_index += l; 1305 s->io_buffer_offset += l; 1306 1307 DPRINTF(ad->port_no, "len=%#x\n", l); 1308 1309 return 1; 1310 } 1311 1312 static void ahci_cmd_done(IDEDMA *dma) 1313 { 1314 AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); 1315 1316 DPRINTF(ad->port_no, "cmd done\n"); 1317 1318 /* update d2h status */ 1319 ahci_write_fis_d2h(ad, NULL); 1320 1321 if (!ad->check_bh) { 1322 /* maybe we still have something to process, check later */ 1323 ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad); 1324 qemu_bh_schedule(ad->check_bh); 1325 } 1326 } 1327 1328 static void ahci_irq_set(void *opaque, int n, int level) 1329 { 1330 } 1331 1332 static const IDEDMAOps ahci_dma_ops = { 1333 .start_dma = ahci_start_dma, 1334 .restart_dma = ahci_restart_dma, 1335 .start_transfer = ahci_start_transfer, 1336 .prepare_buf = ahci_dma_prepare_buf, 1337 .commit_buf = ahci_commit_buf, 1338 .rw_buf = ahci_dma_rw_buf, 1339 .cmd_done = ahci_cmd_done, 1340 }; 1341 1342 void ahci_init(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports) 1343 { 1344 qemu_irq *irqs; 1345 int i; 1346 1347 s->as = as; 1348 s->ports = ports; 1349 s->dev = g_new0(AHCIDevice, ports); 1350 ahci_reg_init(s); 1351 /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */ 1352 memory_region_init_io(&s->mem, OBJECT(qdev), &ahci_mem_ops, s, 1353 "ahci", AHCI_MEM_BAR_SIZE); 1354 memory_region_init_io(&s->idp, OBJECT(qdev), &ahci_idp_ops, s, 1355 "ahci-idp", 32); 1356 1357 irqs = qemu_allocate_irqs(ahci_irq_set, s, s->ports); 1358 1359 for (i = 0; i < s->ports; i++) { 1360 AHCIDevice *ad = &s->dev[i]; 1361 1362 ide_bus_new(&ad->port, sizeof(ad->port), qdev, i, 1); 1363 ide_init2(&ad->port, irqs[i]); 1364 1365 ad->hba = s; 1366 ad->port_no = i; 1367 ad->port.dma = &ad->dma; 1368 ad->port.dma->ops = &ahci_dma_ops; 1369 ide_register_restart_cb(&ad->port); 1370 } 1371 } 1372 1373 void ahci_uninit(AHCIState *s) 1374 { 1375 g_free(s->dev); 1376 } 1377 1378 void ahci_reset(AHCIState *s) 1379 { 1380 AHCIPortRegs *pr; 1381 int i; 1382 1383 s->control_regs.irqstatus = 0; 1384 /* AHCI Enable (AE) 1385 * The implementation of this bit is dependent upon the value of the 1386 * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and 1387 * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be 1388 * read-only and shall have a reset value of '1'. 1389 * 1390 * We set HOST_CAP_AHCI so we must enable AHCI at reset. 1391 */ 1392 s->control_regs.ghc = HOST_CTL_AHCI_EN; 1393 1394 for (i = 0; i < s->ports; i++) { 1395 pr = &s->dev[i].port_regs; 1396 pr->irq_stat = 0; 1397 pr->irq_mask = 0; 1398 pr->scr_ctl = 0; 1399 pr->cmd = PORT_CMD_SPIN_UP | PORT_CMD_POWER_ON; 1400 ahci_reset_port(s, i); 1401 } 1402 } 1403 1404 static const VMStateDescription vmstate_ahci_device = { 1405 .name = "ahci port", 1406 .version_id = 1, 1407 .fields = (VMStateField[]) { 1408 VMSTATE_IDE_BUS(port, AHCIDevice), 1409 VMSTATE_IDE_DRIVE(port.ifs[0], AHCIDevice), 1410 VMSTATE_UINT32(port_state, AHCIDevice), 1411 VMSTATE_UINT32(finished, AHCIDevice), 1412 VMSTATE_UINT32(port_regs.lst_addr, AHCIDevice), 1413 VMSTATE_UINT32(port_regs.lst_addr_hi, AHCIDevice), 1414 VMSTATE_UINT32(port_regs.fis_addr, AHCIDevice), 1415 VMSTATE_UINT32(port_regs.fis_addr_hi, AHCIDevice), 1416 VMSTATE_UINT32(port_regs.irq_stat, AHCIDevice), 1417 VMSTATE_UINT32(port_regs.irq_mask, AHCIDevice), 1418 VMSTATE_UINT32(port_regs.cmd, AHCIDevice), 1419 VMSTATE_UINT32(port_regs.tfdata, AHCIDevice), 1420 VMSTATE_UINT32(port_regs.sig, AHCIDevice), 1421 VMSTATE_UINT32(port_regs.scr_stat, AHCIDevice), 1422 VMSTATE_UINT32(port_regs.scr_ctl, AHCIDevice), 1423 VMSTATE_UINT32(port_regs.scr_err, AHCIDevice), 1424 VMSTATE_UINT32(port_regs.scr_act, AHCIDevice), 1425 VMSTATE_UINT32(port_regs.cmd_issue, AHCIDevice), 1426 VMSTATE_BOOL(done_atapi_packet, AHCIDevice), 1427 VMSTATE_INT32(busy_slot, AHCIDevice), 1428 VMSTATE_BOOL(init_d2h_sent, AHCIDevice), 1429 VMSTATE_END_OF_LIST() 1430 }, 1431 }; 1432 1433 static int ahci_state_post_load(void *opaque, int version_id) 1434 { 1435 int i; 1436 struct AHCIDevice *ad; 1437 AHCIState *s = opaque; 1438 1439 for (i = 0; i < s->ports; i++) { 1440 ad = &s->dev[i]; 1441 1442 /* Only remap the CLB address if appropriate, disallowing a state 1443 * transition from 'on' to 'off' it should be consistent here. */ 1444 if (ahci_cond_start_engines(ad, false) != 0) { 1445 return -1; 1446 } 1447 1448 /* 1449 * If an error is present, ad->busy_slot will be valid and not -1. 1450 * In this case, an operation is waiting to resume and will re-check 1451 * for additional AHCI commands to execute upon completion. 1452 * 1453 * In the case where no error was present, busy_slot will be -1, 1454 * and we should check to see if there are additional commands waiting. 1455 */ 1456 if (ad->busy_slot == -1) { 1457 check_cmd(s, i); 1458 } else { 1459 /* We are in the middle of a command, and may need to access 1460 * the command header in guest memory again. */ 1461 if (ad->busy_slot < 0 || ad->busy_slot >= AHCI_MAX_CMDS) { 1462 return -1; 1463 } 1464 ad->cur_cmd = &((AHCICmdHdr *)ad->lst)[ad->busy_slot]; 1465 } 1466 } 1467 1468 return 0; 1469 } 1470 1471 const VMStateDescription vmstate_ahci = { 1472 .name = "ahci", 1473 .version_id = 1, 1474 .post_load = ahci_state_post_load, 1475 .fields = (VMStateField[]) { 1476 VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev, AHCIState, ports, 1477 vmstate_ahci_device, AHCIDevice), 1478 VMSTATE_UINT32(control_regs.cap, AHCIState), 1479 VMSTATE_UINT32(control_regs.ghc, AHCIState), 1480 VMSTATE_UINT32(control_regs.irqstatus, AHCIState), 1481 VMSTATE_UINT32(control_regs.impl, AHCIState), 1482 VMSTATE_UINT32(control_regs.version, AHCIState), 1483 VMSTATE_UINT32(idp_index, AHCIState), 1484 VMSTATE_INT32_EQUAL(ports, AHCIState), 1485 VMSTATE_END_OF_LIST() 1486 }, 1487 }; 1488 1489 #define TYPE_SYSBUS_AHCI "sysbus-ahci" 1490 #define SYSBUS_AHCI(obj) OBJECT_CHECK(SysbusAHCIState, (obj), TYPE_SYSBUS_AHCI) 1491 1492 typedef struct SysbusAHCIState { 1493 /*< private >*/ 1494 SysBusDevice parent_obj; 1495 /*< public >*/ 1496 1497 AHCIState ahci; 1498 uint32_t num_ports; 1499 } SysbusAHCIState; 1500 1501 static const VMStateDescription vmstate_sysbus_ahci = { 1502 .name = "sysbus-ahci", 1503 .fields = (VMStateField[]) { 1504 VMSTATE_AHCI(ahci, SysbusAHCIState), 1505 VMSTATE_END_OF_LIST() 1506 }, 1507 }; 1508 1509 static void sysbus_ahci_reset(DeviceState *dev) 1510 { 1511 SysbusAHCIState *s = SYSBUS_AHCI(dev); 1512 1513 ahci_reset(&s->ahci); 1514 } 1515 1516 static void sysbus_ahci_realize(DeviceState *dev, Error **errp) 1517 { 1518 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1519 SysbusAHCIState *s = SYSBUS_AHCI(dev); 1520 1521 ahci_init(&s->ahci, dev, &address_space_memory, s->num_ports); 1522 1523 sysbus_init_mmio(sbd, &s->ahci.mem); 1524 sysbus_init_irq(sbd, &s->ahci.irq); 1525 } 1526 1527 static Property sysbus_ahci_properties[] = { 1528 DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, num_ports, 1), 1529 DEFINE_PROP_END_OF_LIST(), 1530 }; 1531 1532 static void sysbus_ahci_class_init(ObjectClass *klass, void *data) 1533 { 1534 DeviceClass *dc = DEVICE_CLASS(klass); 1535 1536 dc->realize = sysbus_ahci_realize; 1537 dc->vmsd = &vmstate_sysbus_ahci; 1538 dc->props = sysbus_ahci_properties; 1539 dc->reset = sysbus_ahci_reset; 1540 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1541 } 1542 1543 static const TypeInfo sysbus_ahci_info = { 1544 .name = TYPE_SYSBUS_AHCI, 1545 .parent = TYPE_SYS_BUS_DEVICE, 1546 .instance_size = sizeof(SysbusAHCIState), 1547 .class_init = sysbus_ahci_class_init, 1548 }; 1549 1550 static void sysbus_ahci_register_types(void) 1551 { 1552 type_register_static(&sysbus_ahci_info); 1553 } 1554 1555 type_init(sysbus_ahci_register_types) 1556 1557 void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd) 1558 { 1559 AHCIPCIState *d = ICH_AHCI(dev); 1560 AHCIState *ahci = &d->ahci; 1561 int i; 1562 1563 for (i = 0; i < ahci->ports; i++) { 1564 if (hd[i] == NULL) { 1565 continue; 1566 } 1567 ide_create_drive(&ahci->dev[i].port, 0, hd[i]); 1568 } 1569 1570 } 1571