1 /* 2 * QEMU ESP/NCR53C9x emulation 3 * 4 * Copyright (c) 2005-2006 Fabrice Bellard 5 * Copyright (c) 2012 Herve Poussineau 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "hw/sysbus.h" 28 #include "migration/vmstate.h" 29 #include "hw/irq.h" 30 #include "hw/scsi/esp.h" 31 #include "trace.h" 32 #include "qemu/log.h" 33 #include "qemu/module.h" 34 35 /* 36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O), 37 * also produced as NCR89C100. See 38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt 39 * and 40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt 41 * 42 * On Macintosh Quadra it is a NCR53C96. 43 */ 44 45 static void esp_raise_irq(ESPState *s) 46 { 47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) { 48 s->rregs[ESP_RSTAT] |= STAT_INT; 49 qemu_irq_raise(s->irq); 50 trace_esp_raise_irq(); 51 } 52 } 53 54 static void esp_lower_irq(ESPState *s) 55 { 56 if (s->rregs[ESP_RSTAT] & STAT_INT) { 57 s->rregs[ESP_RSTAT] &= ~STAT_INT; 58 qemu_irq_lower(s->irq); 59 trace_esp_lower_irq(); 60 } 61 } 62 63 static void esp_raise_drq(ESPState *s) 64 { 65 qemu_irq_raise(s->irq_data); 66 trace_esp_raise_drq(); 67 } 68 69 static void esp_lower_drq(ESPState *s) 70 { 71 qemu_irq_lower(s->irq_data); 72 trace_esp_lower_drq(); 73 } 74 75 void esp_dma_enable(ESPState *s, int irq, int level) 76 { 77 if (level) { 78 s->dma_enabled = 1; 79 trace_esp_dma_enable(); 80 if (s->dma_cb) { 81 s->dma_cb(s); 82 s->dma_cb = NULL; 83 } 84 } else { 85 trace_esp_dma_disable(); 86 s->dma_enabled = 0; 87 } 88 } 89 90 void esp_request_cancelled(SCSIRequest *req) 91 { 92 ESPState *s = req->hba_private; 93 94 if (req == s->current_req) { 95 scsi_req_unref(s->current_req); 96 s->current_req = NULL; 97 s->current_dev = NULL; 98 s->async_len = 0; 99 } 100 } 101 102 static void esp_fifo_push(Fifo8 *fifo, uint8_t val) 103 { 104 if (fifo8_num_used(fifo) == fifo->capacity) { 105 trace_esp_error_fifo_overrun(); 106 return; 107 } 108 109 fifo8_push(fifo, val); 110 } 111 112 static uint8_t esp_fifo_pop(Fifo8 *fifo) 113 { 114 if (fifo8_is_empty(fifo)) { 115 return 0; 116 } 117 118 return fifo8_pop(fifo); 119 } 120 121 static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen) 122 { 123 const uint8_t *buf; 124 uint32_t n; 125 126 if (maxlen == 0) { 127 return 0; 128 } 129 130 buf = fifo8_pop_buf(fifo, maxlen, &n); 131 if (dest) { 132 memcpy(dest, buf, n); 133 } 134 135 return n; 136 } 137 138 static uint32_t esp_get_tc(ESPState *s) 139 { 140 uint32_t dmalen; 141 142 dmalen = s->rregs[ESP_TCLO]; 143 dmalen |= s->rregs[ESP_TCMID] << 8; 144 dmalen |= s->rregs[ESP_TCHI] << 16; 145 146 return dmalen; 147 } 148 149 static void esp_set_tc(ESPState *s, uint32_t dmalen) 150 { 151 s->rregs[ESP_TCLO] = dmalen; 152 s->rregs[ESP_TCMID] = dmalen >> 8; 153 s->rregs[ESP_TCHI] = dmalen >> 16; 154 } 155 156 static uint32_t esp_get_stc(ESPState *s) 157 { 158 uint32_t dmalen; 159 160 dmalen = s->wregs[ESP_TCLO]; 161 dmalen |= s->wregs[ESP_TCMID] << 8; 162 dmalen |= s->wregs[ESP_TCHI] << 16; 163 164 return dmalen; 165 } 166 167 static uint8_t esp_pdma_read(ESPState *s) 168 { 169 uint8_t val; 170 171 if (s->do_cmd) { 172 val = esp_fifo_pop(&s->cmdfifo); 173 } else { 174 val = esp_fifo_pop(&s->fifo); 175 } 176 177 return val; 178 } 179 180 static void esp_pdma_write(ESPState *s, uint8_t val) 181 { 182 uint32_t dmalen = esp_get_tc(s); 183 184 if (dmalen == 0) { 185 return; 186 } 187 188 if (s->do_cmd) { 189 esp_fifo_push(&s->cmdfifo, val); 190 } else { 191 esp_fifo_push(&s->fifo, val); 192 } 193 194 dmalen--; 195 esp_set_tc(s, dmalen); 196 } 197 198 static int esp_select(ESPState *s) 199 { 200 int target; 201 202 target = s->wregs[ESP_WBUSID] & BUSID_DID; 203 204 s->ti_size = 0; 205 fifo8_reset(&s->fifo); 206 207 s->current_dev = scsi_device_find(&s->bus, 0, target, 0); 208 if (!s->current_dev) { 209 /* No such drive */ 210 s->rregs[ESP_RSTAT] = 0; 211 s->rregs[ESP_RINTR] = INTR_DC; 212 s->rregs[ESP_RSEQ] = SEQ_0; 213 esp_raise_irq(s); 214 return -1; 215 } 216 217 /* 218 * Note that we deliberately don't raise the IRQ here: this will be done 219 * either in do_command_phase() for DATA OUT transfers or by the deferred 220 * IRQ mechanism in esp_transfer_data() for DATA IN transfers 221 */ 222 s->rregs[ESP_RINTR] |= INTR_FC; 223 s->rregs[ESP_RSEQ] = SEQ_CD; 224 return 0; 225 } 226 227 static uint32_t get_cmd(ESPState *s, uint32_t maxlen) 228 { 229 uint8_t buf[ESP_CMDFIFO_SZ]; 230 uint32_t dmalen, n; 231 int target; 232 233 if (s->current_req) { 234 /* Started a new command before the old one finished. Cancel it. */ 235 scsi_req_cancel(s->current_req); 236 } 237 238 target = s->wregs[ESP_WBUSID] & BUSID_DID; 239 if (s->dma) { 240 dmalen = MIN(esp_get_tc(s), maxlen); 241 if (dmalen == 0) { 242 return 0; 243 } 244 if (s->dma_memory_read) { 245 s->dma_memory_read(s->dma_opaque, buf, dmalen); 246 dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen); 247 fifo8_push_all(&s->cmdfifo, buf, dmalen); 248 } else { 249 if (esp_select(s) < 0) { 250 fifo8_reset(&s->cmdfifo); 251 return -1; 252 } 253 esp_raise_drq(s); 254 fifo8_reset(&s->cmdfifo); 255 return 0; 256 } 257 } else { 258 dmalen = MIN(fifo8_num_used(&s->fifo), maxlen); 259 if (dmalen == 0) { 260 return 0; 261 } 262 n = esp_fifo_pop_buf(&s->fifo, buf, dmalen); 263 n = MIN(fifo8_num_free(&s->cmdfifo), n); 264 fifo8_push_all(&s->cmdfifo, buf, n); 265 } 266 trace_esp_get_cmd(dmalen, target); 267 268 if (esp_select(s) < 0) { 269 fifo8_reset(&s->cmdfifo); 270 return -1; 271 } 272 return dmalen; 273 } 274 275 static void do_command_phase(ESPState *s) 276 { 277 uint32_t cmdlen; 278 int32_t datalen; 279 SCSIDevice *current_lun; 280 uint8_t buf[ESP_CMDFIFO_SZ]; 281 282 trace_esp_do_command_phase(s->lun); 283 cmdlen = fifo8_num_used(&s->cmdfifo); 284 if (!cmdlen || !s->current_dev) { 285 return; 286 } 287 esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen); 288 289 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun); 290 s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, s); 291 datalen = scsi_req_enqueue(s->current_req); 292 s->ti_size = datalen; 293 fifo8_reset(&s->cmdfifo); 294 if (datalen != 0) { 295 s->rregs[ESP_RSTAT] = STAT_TC; 296 s->rregs[ESP_RSEQ] = SEQ_CD; 297 s->ti_cmd = 0; 298 esp_set_tc(s, 0); 299 if (datalen > 0) { 300 /* 301 * Switch to DATA IN phase but wait until initial data xfer is 302 * complete before raising the command completion interrupt 303 */ 304 s->data_in_ready = false; 305 s->rregs[ESP_RSTAT] |= STAT_DI; 306 } else { 307 s->rregs[ESP_RSTAT] |= STAT_DO; 308 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 309 esp_raise_irq(s); 310 esp_lower_drq(s); 311 } 312 scsi_req_continue(s->current_req); 313 return; 314 } 315 } 316 317 static void do_message_phase(ESPState *s) 318 { 319 if (s->cmdfifo_cdb_offset) { 320 uint8_t message = esp_fifo_pop(&s->cmdfifo); 321 322 trace_esp_do_identify(message); 323 s->lun = message & 7; 324 s->cmdfifo_cdb_offset--; 325 } 326 327 /* Ignore extended messages for now */ 328 if (s->cmdfifo_cdb_offset) { 329 int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo)); 330 esp_fifo_pop_buf(&s->cmdfifo, NULL, len); 331 s->cmdfifo_cdb_offset = 0; 332 } 333 } 334 335 static void do_cmd(ESPState *s) 336 { 337 do_message_phase(s); 338 assert(s->cmdfifo_cdb_offset == 0); 339 do_command_phase(s); 340 } 341 342 static void satn_pdma_cb(ESPState *s) 343 { 344 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 345 s->cmdfifo_cdb_offset = 1; 346 s->do_cmd = 0; 347 do_cmd(s); 348 } 349 } 350 351 static void handle_satn(ESPState *s) 352 { 353 int32_t cmdlen; 354 355 if (s->dma && !s->dma_enabled) { 356 s->dma_cb = handle_satn; 357 return; 358 } 359 s->pdma_cb = satn_pdma_cb; 360 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 361 if (cmdlen > 0) { 362 s->cmdfifo_cdb_offset = 1; 363 s->do_cmd = 0; 364 do_cmd(s); 365 } else if (cmdlen == 0) { 366 s->do_cmd = 1; 367 /* Target present, but no cmd yet - switch to command phase */ 368 s->rregs[ESP_RSEQ] = SEQ_CD; 369 s->rregs[ESP_RSTAT] = STAT_CD; 370 } 371 } 372 373 static void s_without_satn_pdma_cb(ESPState *s) 374 { 375 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 376 s->cmdfifo_cdb_offset = 0; 377 s->do_cmd = 0; 378 do_cmd(s); 379 } 380 } 381 382 static void handle_s_without_atn(ESPState *s) 383 { 384 int32_t cmdlen; 385 386 if (s->dma && !s->dma_enabled) { 387 s->dma_cb = handle_s_without_atn; 388 return; 389 } 390 s->pdma_cb = s_without_satn_pdma_cb; 391 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 392 if (cmdlen > 0) { 393 s->cmdfifo_cdb_offset = 0; 394 s->do_cmd = 0; 395 do_cmd(s); 396 } else if (cmdlen == 0) { 397 s->do_cmd = 1; 398 /* Target present, but no cmd yet - switch to command phase */ 399 s->rregs[ESP_RSEQ] = SEQ_CD; 400 s->rregs[ESP_RSTAT] = STAT_CD; 401 } 402 } 403 404 static void satn_stop_pdma_cb(ESPState *s) 405 { 406 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 407 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 408 s->do_cmd = 1; 409 s->cmdfifo_cdb_offset = 1; 410 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 411 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 412 s->rregs[ESP_RSEQ] = SEQ_CD; 413 esp_raise_irq(s); 414 } 415 } 416 417 static void handle_satn_stop(ESPState *s) 418 { 419 int32_t cmdlen; 420 421 if (s->dma && !s->dma_enabled) { 422 s->dma_cb = handle_satn_stop; 423 return; 424 } 425 s->pdma_cb = satn_stop_pdma_cb; 426 cmdlen = get_cmd(s, 1); 427 if (cmdlen > 0) { 428 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 429 s->do_cmd = 1; 430 s->cmdfifo_cdb_offset = 1; 431 s->rregs[ESP_RSTAT] = STAT_MO; 432 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 433 s->rregs[ESP_RSEQ] = SEQ_MO; 434 esp_raise_irq(s); 435 } else if (cmdlen == 0) { 436 s->do_cmd = 1; 437 /* Target present, switch to message out phase */ 438 s->rregs[ESP_RSEQ] = SEQ_MO; 439 s->rregs[ESP_RSTAT] = STAT_MO; 440 } 441 } 442 443 static void write_response_pdma_cb(ESPState *s) 444 { 445 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 446 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 447 s->rregs[ESP_RSEQ] = SEQ_CD; 448 esp_raise_irq(s); 449 } 450 451 static void write_response(ESPState *s) 452 { 453 uint8_t buf[2]; 454 455 trace_esp_write_response(s->status); 456 457 buf[0] = s->status; 458 buf[1] = 0; 459 460 if (s->dma) { 461 if (s->dma_memory_write) { 462 s->dma_memory_write(s->dma_opaque, buf, 2); 463 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 464 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 465 s->rregs[ESP_RSEQ] = SEQ_CD; 466 } else { 467 s->pdma_cb = write_response_pdma_cb; 468 esp_raise_drq(s); 469 return; 470 } 471 } else { 472 fifo8_reset(&s->fifo); 473 fifo8_push_all(&s->fifo, buf, 2); 474 s->rregs[ESP_RFLAGS] = 2; 475 } 476 esp_raise_irq(s); 477 } 478 479 static void esp_dma_done(ESPState *s) 480 { 481 s->rregs[ESP_RSTAT] |= STAT_TC; 482 s->rregs[ESP_RINTR] |= INTR_BS; 483 s->rregs[ESP_RFLAGS] = 0; 484 esp_set_tc(s, 0); 485 esp_raise_irq(s); 486 } 487 488 static void do_dma_pdma_cb(ESPState *s) 489 { 490 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 491 int len; 492 uint32_t n; 493 494 if (s->do_cmd) { 495 /* Ensure we have received complete command after SATN and stop */ 496 if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) { 497 return; 498 } 499 500 s->ti_size = 0; 501 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 502 /* No command received */ 503 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 504 return; 505 } 506 507 /* Command has been received */ 508 s->do_cmd = 0; 509 do_cmd(s); 510 } else { 511 /* 512 * Extra message out bytes received: update cmdfifo_cdb_offset 513 * and then switch to commmand phase 514 */ 515 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 516 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 517 s->rregs[ESP_RSEQ] = SEQ_CD; 518 s->rregs[ESP_RINTR] |= INTR_BS; 519 esp_raise_irq(s); 520 } 521 return; 522 } 523 524 if (!s->current_req) { 525 return; 526 } 527 528 if (to_device) { 529 /* Copy FIFO data to device */ 530 len = MIN(s->async_len, ESP_FIFO_SZ); 531 len = MIN(len, fifo8_num_used(&s->fifo)); 532 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 533 s->async_buf += n; 534 s->async_len -= n; 535 s->ti_size += n; 536 537 if (n < len) { 538 /* Unaligned accesses can cause FIFO wraparound */ 539 len = len - n; 540 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 541 s->async_buf += n; 542 s->async_len -= n; 543 s->ti_size += n; 544 } 545 546 if (s->async_len == 0) { 547 scsi_req_continue(s->current_req); 548 return; 549 } 550 551 if (esp_get_tc(s) == 0) { 552 esp_lower_drq(s); 553 esp_dma_done(s); 554 } 555 556 return; 557 } else { 558 if (s->async_len == 0) { 559 /* Defer until the scsi layer has completed */ 560 scsi_req_continue(s->current_req); 561 s->data_in_ready = false; 562 return; 563 } 564 565 if (esp_get_tc(s) != 0) { 566 /* Copy device data to FIFO */ 567 len = MIN(s->async_len, esp_get_tc(s)); 568 len = MIN(len, fifo8_num_free(&s->fifo)); 569 fifo8_push_all(&s->fifo, s->async_buf, len); 570 s->async_buf += len; 571 s->async_len -= len; 572 s->ti_size -= len; 573 esp_set_tc(s, esp_get_tc(s) - len); 574 575 if (esp_get_tc(s) == 0) { 576 /* Indicate transfer to FIFO is complete */ 577 s->rregs[ESP_RSTAT] |= STAT_TC; 578 } 579 return; 580 } 581 582 /* Partially filled a scsi buffer. Complete immediately. */ 583 esp_lower_drq(s); 584 esp_dma_done(s); 585 } 586 } 587 588 static void esp_do_dma(ESPState *s) 589 { 590 uint32_t len, cmdlen; 591 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 592 uint8_t buf[ESP_CMDFIFO_SZ]; 593 594 len = esp_get_tc(s); 595 if (s->do_cmd) { 596 /* 597 * handle_ti_cmd() case: esp_do_dma() is called only from 598 * handle_ti_cmd() with do_cmd != NULL (see the assert()) 599 */ 600 cmdlen = fifo8_num_used(&s->cmdfifo); 601 trace_esp_do_dma(cmdlen, len); 602 if (s->dma_memory_read) { 603 len = MIN(len, fifo8_num_free(&s->cmdfifo)); 604 s->dma_memory_read(s->dma_opaque, buf, len); 605 fifo8_push_all(&s->cmdfifo, buf, len); 606 } else { 607 s->pdma_cb = do_dma_pdma_cb; 608 esp_raise_drq(s); 609 return; 610 } 611 trace_esp_handle_ti_cmd(cmdlen); 612 s->ti_size = 0; 613 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 614 /* No command received */ 615 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 616 return; 617 } 618 619 /* Command has been received */ 620 s->do_cmd = 0; 621 do_cmd(s); 622 } else { 623 /* 624 * Extra message out bytes received: update cmdfifo_cdb_offset 625 * and then switch to commmand phase 626 */ 627 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 628 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 629 s->rregs[ESP_RSEQ] = SEQ_CD; 630 s->rregs[ESP_RINTR] |= INTR_BS; 631 esp_raise_irq(s); 632 } 633 return; 634 } 635 if (!s->current_req) { 636 return; 637 } 638 if (s->async_len == 0) { 639 /* Defer until data is available. */ 640 return; 641 } 642 if (len > s->async_len) { 643 len = s->async_len; 644 } 645 if (to_device) { 646 if (s->dma_memory_read) { 647 s->dma_memory_read(s->dma_opaque, s->async_buf, len); 648 } else { 649 s->pdma_cb = do_dma_pdma_cb; 650 esp_raise_drq(s); 651 return; 652 } 653 } else { 654 if (s->dma_memory_write) { 655 s->dma_memory_write(s->dma_opaque, s->async_buf, len); 656 } else { 657 /* Adjust TC for any leftover data in the FIFO */ 658 if (!fifo8_is_empty(&s->fifo)) { 659 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo)); 660 } 661 662 /* Copy device data to FIFO */ 663 len = MIN(len, fifo8_num_free(&s->fifo)); 664 fifo8_push_all(&s->fifo, s->async_buf, len); 665 s->async_buf += len; 666 s->async_len -= len; 667 s->ti_size -= len; 668 669 /* 670 * MacOS toolbox uses a TI length of 16 bytes for all commands, so 671 * commands shorter than this must be padded accordingly 672 */ 673 if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) { 674 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) { 675 esp_fifo_push(&s->fifo, 0); 676 len++; 677 } 678 } 679 680 esp_set_tc(s, esp_get_tc(s) - len); 681 s->pdma_cb = do_dma_pdma_cb; 682 esp_raise_drq(s); 683 684 /* Indicate transfer to FIFO is complete */ 685 s->rregs[ESP_RSTAT] |= STAT_TC; 686 return; 687 } 688 } 689 esp_set_tc(s, esp_get_tc(s) - len); 690 s->async_buf += len; 691 s->async_len -= len; 692 if (to_device) { 693 s->ti_size += len; 694 } else { 695 s->ti_size -= len; 696 } 697 if (s->async_len == 0) { 698 scsi_req_continue(s->current_req); 699 /* 700 * If there is still data to be read from the device then 701 * complete the DMA operation immediately. Otherwise defer 702 * until the scsi layer has completed. 703 */ 704 if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) { 705 return; 706 } 707 } 708 709 /* Partially filled a scsi buffer. Complete immediately. */ 710 esp_dma_done(s); 711 esp_lower_drq(s); 712 } 713 714 static void esp_do_nodma(ESPState *s) 715 { 716 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 717 uint32_t cmdlen; 718 int len; 719 720 if (s->do_cmd) { 721 cmdlen = fifo8_num_used(&s->cmdfifo); 722 trace_esp_handle_ti_cmd(cmdlen); 723 s->ti_size = 0; 724 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 725 /* No command received */ 726 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 727 return; 728 } 729 730 /* Command has been received */ 731 s->do_cmd = 0; 732 do_cmd(s); 733 } else { 734 /* 735 * Extra message out bytes received: update cmdfifo_cdb_offset 736 * and then switch to commmand phase 737 */ 738 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 739 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 740 s->rregs[ESP_RSEQ] = SEQ_CD; 741 s->rregs[ESP_RINTR] |= INTR_BS; 742 esp_raise_irq(s); 743 } 744 return; 745 } 746 747 if (!s->current_req) { 748 return; 749 } 750 751 if (s->async_len == 0) { 752 /* Defer until data is available. */ 753 return; 754 } 755 756 if (to_device) { 757 len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ); 758 esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 759 s->async_buf += len; 760 s->async_len -= len; 761 s->ti_size += len; 762 } else { 763 if (fifo8_is_empty(&s->fifo)) { 764 fifo8_push(&s->fifo, s->async_buf[0]); 765 s->async_buf++; 766 s->async_len--; 767 s->ti_size--; 768 } 769 } 770 771 if (s->async_len == 0) { 772 scsi_req_continue(s->current_req); 773 return; 774 } 775 776 s->rregs[ESP_RINTR] |= INTR_BS; 777 esp_raise_irq(s); 778 } 779 780 void esp_command_complete(SCSIRequest *req, size_t resid) 781 { 782 ESPState *s = req->hba_private; 783 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 784 785 trace_esp_command_complete(); 786 787 /* 788 * Non-DMA transfers from the target will leave the last byte in 789 * the FIFO so don't reset ti_size in this case 790 */ 791 if (s->dma || to_device) { 792 if (s->ti_size != 0) { 793 trace_esp_command_complete_unexpected(); 794 } 795 s->ti_size = 0; 796 } 797 798 s->async_len = 0; 799 if (req->status) { 800 trace_esp_command_complete_fail(); 801 } 802 s->status = req->status; 803 804 /* 805 * If the transfer is finished, switch to status phase. For non-DMA 806 * transfers from the target the last byte is still in the FIFO 807 */ 808 if (s->ti_size == 0) { 809 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 810 esp_dma_done(s); 811 esp_lower_drq(s); 812 } 813 814 if (s->current_req) { 815 scsi_req_unref(s->current_req); 816 s->current_req = NULL; 817 s->current_dev = NULL; 818 } 819 } 820 821 void esp_transfer_data(SCSIRequest *req, uint32_t len) 822 { 823 ESPState *s = req->hba_private; 824 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 825 uint32_t dmalen = esp_get_tc(s); 826 827 assert(!s->do_cmd); 828 trace_esp_transfer_data(dmalen, s->ti_size); 829 s->async_len = len; 830 s->async_buf = scsi_req_get_buf(req); 831 832 if (!to_device && !s->data_in_ready) { 833 /* 834 * Initial incoming data xfer is complete so raise command 835 * completion interrupt 836 */ 837 s->data_in_ready = true; 838 s->rregs[ESP_RSTAT] |= STAT_TC; 839 s->rregs[ESP_RINTR] |= INTR_BS; 840 esp_raise_irq(s); 841 } 842 843 if (s->ti_cmd == 0) { 844 /* 845 * Always perform the initial transfer upon reception of the next TI 846 * command to ensure the DMA/non-DMA status of the command is correct. 847 * It is not possible to use s->dma directly in the section below as 848 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the 849 * async data transfer is delayed then s->dma is set incorrectly. 850 */ 851 return; 852 } 853 854 if (s->ti_cmd == (CMD_TI | CMD_DMA)) { 855 if (dmalen) { 856 esp_do_dma(s); 857 } else if (s->ti_size <= 0) { 858 /* 859 * If this was the last part of a DMA transfer then the 860 * completion interrupt is deferred to here. 861 */ 862 esp_dma_done(s); 863 esp_lower_drq(s); 864 } 865 } else if (s->ti_cmd == CMD_TI) { 866 esp_do_nodma(s); 867 } 868 } 869 870 static void handle_ti(ESPState *s) 871 { 872 uint32_t dmalen; 873 874 if (s->dma && !s->dma_enabled) { 875 s->dma_cb = handle_ti; 876 return; 877 } 878 879 s->ti_cmd = s->rregs[ESP_CMD]; 880 if (s->dma) { 881 dmalen = esp_get_tc(s); 882 trace_esp_handle_ti(dmalen); 883 s->rregs[ESP_RSTAT] &= ~STAT_TC; 884 esp_do_dma(s); 885 } else { 886 trace_esp_handle_ti(s->ti_size); 887 esp_do_nodma(s); 888 } 889 } 890 891 void esp_hard_reset(ESPState *s) 892 { 893 memset(s->rregs, 0, ESP_REGS); 894 memset(s->wregs, 0, ESP_REGS); 895 s->tchi_written = 0; 896 s->ti_size = 0; 897 s->async_len = 0; 898 fifo8_reset(&s->fifo); 899 fifo8_reset(&s->cmdfifo); 900 s->dma = 0; 901 s->do_cmd = 0; 902 s->dma_cb = NULL; 903 904 s->rregs[ESP_CFG1] = 7; 905 } 906 907 static void esp_soft_reset(ESPState *s) 908 { 909 qemu_irq_lower(s->irq); 910 qemu_irq_lower(s->irq_data); 911 esp_hard_reset(s); 912 } 913 914 static void parent_esp_reset(ESPState *s, int irq, int level) 915 { 916 if (level) { 917 esp_soft_reset(s); 918 } 919 } 920 921 uint64_t esp_reg_read(ESPState *s, uint32_t saddr) 922 { 923 uint32_t val; 924 925 switch (saddr) { 926 case ESP_FIFO: 927 if (s->dma_memory_read && s->dma_memory_write && 928 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) { 929 /* Data out. */ 930 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n"); 931 s->rregs[ESP_FIFO] = 0; 932 } else { 933 if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) { 934 if (s->ti_size) { 935 esp_do_nodma(s); 936 } else { 937 /* 938 * The last byte of a non-DMA transfer has been read out 939 * of the FIFO so switch to status phase 940 */ 941 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 942 } 943 } 944 s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo); 945 } 946 val = s->rregs[ESP_FIFO]; 947 break; 948 case ESP_RINTR: 949 /* 950 * Clear sequence step, interrupt register and all status bits 951 * except TC 952 */ 953 val = s->rregs[ESP_RINTR]; 954 s->rregs[ESP_RINTR] = 0; 955 s->rregs[ESP_RSTAT] &= ~STAT_TC; 956 /* 957 * According to the datasheet ESP_RSEQ should be cleared, but as the 958 * emulation currently defers information transfers to the next TI 959 * command leave it for now so that pedantic guests such as the old 960 * Linux 2.6 driver see the correct flags before the next SCSI phase 961 * transition. 962 * 963 * s->rregs[ESP_RSEQ] = SEQ_0; 964 */ 965 esp_lower_irq(s); 966 break; 967 case ESP_TCHI: 968 /* Return the unique id if the value has never been written */ 969 if (!s->tchi_written) { 970 val = s->chip_id; 971 } else { 972 val = s->rregs[saddr]; 973 } 974 break; 975 case ESP_RFLAGS: 976 /* Bottom 5 bits indicate number of bytes in FIFO */ 977 val = fifo8_num_used(&s->fifo); 978 break; 979 default: 980 val = s->rregs[saddr]; 981 break; 982 } 983 984 trace_esp_mem_readb(saddr, val); 985 return val; 986 } 987 988 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) 989 { 990 trace_esp_mem_writeb(saddr, s->wregs[saddr], val); 991 switch (saddr) { 992 case ESP_TCHI: 993 s->tchi_written = true; 994 /* fall through */ 995 case ESP_TCLO: 996 case ESP_TCMID: 997 s->rregs[ESP_RSTAT] &= ~STAT_TC; 998 break; 999 case ESP_FIFO: 1000 if (s->do_cmd) { 1001 esp_fifo_push(&s->cmdfifo, val); 1002 1003 /* 1004 * If any unexpected message out/command phase data is 1005 * transferred using non-DMA, raise the interrupt 1006 */ 1007 if (s->rregs[ESP_CMD] == CMD_TI) { 1008 s->rregs[ESP_RINTR] |= INTR_BS; 1009 esp_raise_irq(s); 1010 } 1011 } else { 1012 esp_fifo_push(&s->fifo, val); 1013 } 1014 break; 1015 case ESP_CMD: 1016 s->rregs[saddr] = val; 1017 if (val & CMD_DMA) { 1018 s->dma = 1; 1019 /* Reload DMA counter. */ 1020 if (esp_get_stc(s) == 0) { 1021 esp_set_tc(s, 0x10000); 1022 } else { 1023 esp_set_tc(s, esp_get_stc(s)); 1024 } 1025 } else { 1026 s->dma = 0; 1027 } 1028 switch (val & CMD_CMD) { 1029 case CMD_NOP: 1030 trace_esp_mem_writeb_cmd_nop(val); 1031 break; 1032 case CMD_FLUSH: 1033 trace_esp_mem_writeb_cmd_flush(val); 1034 fifo8_reset(&s->fifo); 1035 break; 1036 case CMD_RESET: 1037 trace_esp_mem_writeb_cmd_reset(val); 1038 esp_soft_reset(s); 1039 break; 1040 case CMD_BUSRESET: 1041 trace_esp_mem_writeb_cmd_bus_reset(val); 1042 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { 1043 s->rregs[ESP_RINTR] |= INTR_RST; 1044 esp_raise_irq(s); 1045 } 1046 break; 1047 case CMD_TI: 1048 trace_esp_mem_writeb_cmd_ti(val); 1049 handle_ti(s); 1050 break; 1051 case CMD_ICCS: 1052 trace_esp_mem_writeb_cmd_iccs(val); 1053 write_response(s); 1054 s->rregs[ESP_RINTR] |= INTR_FC; 1055 s->rregs[ESP_RSTAT] |= STAT_MI; 1056 break; 1057 case CMD_MSGACC: 1058 trace_esp_mem_writeb_cmd_msgacc(val); 1059 s->rregs[ESP_RINTR] |= INTR_DC; 1060 s->rregs[ESP_RSEQ] = 0; 1061 s->rregs[ESP_RFLAGS] = 0; 1062 esp_raise_irq(s); 1063 break; 1064 case CMD_PAD: 1065 trace_esp_mem_writeb_cmd_pad(val); 1066 s->rregs[ESP_RSTAT] = STAT_TC; 1067 s->rregs[ESP_RINTR] |= INTR_FC; 1068 s->rregs[ESP_RSEQ] = 0; 1069 break; 1070 case CMD_SATN: 1071 trace_esp_mem_writeb_cmd_satn(val); 1072 break; 1073 case CMD_RSTATN: 1074 trace_esp_mem_writeb_cmd_rstatn(val); 1075 break; 1076 case CMD_SEL: 1077 trace_esp_mem_writeb_cmd_sel(val); 1078 handle_s_without_atn(s); 1079 break; 1080 case CMD_SELATN: 1081 trace_esp_mem_writeb_cmd_selatn(val); 1082 handle_satn(s); 1083 break; 1084 case CMD_SELATNS: 1085 trace_esp_mem_writeb_cmd_selatns(val); 1086 handle_satn_stop(s); 1087 break; 1088 case CMD_ENSEL: 1089 trace_esp_mem_writeb_cmd_ensel(val); 1090 s->rregs[ESP_RINTR] = 0; 1091 break; 1092 case CMD_DISSEL: 1093 trace_esp_mem_writeb_cmd_dissel(val); 1094 s->rregs[ESP_RINTR] = 0; 1095 esp_raise_irq(s); 1096 break; 1097 default: 1098 trace_esp_error_unhandled_command(val); 1099 break; 1100 } 1101 break; 1102 case ESP_WBUSID ... ESP_WSYNO: 1103 break; 1104 case ESP_CFG1: 1105 case ESP_CFG2: case ESP_CFG3: 1106 case ESP_RES3: case ESP_RES4: 1107 s->rregs[saddr] = val; 1108 break; 1109 case ESP_WCCF ... ESP_WTEST: 1110 break; 1111 default: 1112 trace_esp_error_invalid_write(val, saddr); 1113 return; 1114 } 1115 s->wregs[saddr] = val; 1116 } 1117 1118 static bool esp_mem_accepts(void *opaque, hwaddr addr, 1119 unsigned size, bool is_write, 1120 MemTxAttrs attrs) 1121 { 1122 return (size == 1) || (is_write && size == 4); 1123 } 1124 1125 static bool esp_is_before_version_5(void *opaque, int version_id) 1126 { 1127 ESPState *s = ESP(opaque); 1128 1129 version_id = MIN(version_id, s->mig_version_id); 1130 return version_id < 5; 1131 } 1132 1133 static bool esp_is_version_5(void *opaque, int version_id) 1134 { 1135 ESPState *s = ESP(opaque); 1136 1137 version_id = MIN(version_id, s->mig_version_id); 1138 return version_id >= 5; 1139 } 1140 1141 static bool esp_is_version_6(void *opaque, int version_id) 1142 { 1143 ESPState *s = ESP(opaque); 1144 1145 version_id = MIN(version_id, s->mig_version_id); 1146 return version_id >= 6; 1147 } 1148 1149 int esp_pre_save(void *opaque) 1150 { 1151 ESPState *s = ESP(object_resolve_path_component( 1152 OBJECT(opaque), "esp")); 1153 1154 s->mig_version_id = vmstate_esp.version_id; 1155 return 0; 1156 } 1157 1158 static int esp_post_load(void *opaque, int version_id) 1159 { 1160 ESPState *s = ESP(opaque); 1161 int len, i; 1162 1163 version_id = MIN(version_id, s->mig_version_id); 1164 1165 if (version_id < 5) { 1166 esp_set_tc(s, s->mig_dma_left); 1167 1168 /* Migrate ti_buf to fifo */ 1169 len = s->mig_ti_wptr - s->mig_ti_rptr; 1170 for (i = 0; i < len; i++) { 1171 fifo8_push(&s->fifo, s->mig_ti_buf[i]); 1172 } 1173 1174 /* Migrate cmdbuf to cmdfifo */ 1175 for (i = 0; i < s->mig_cmdlen; i++) { 1176 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]); 1177 } 1178 } 1179 1180 s->mig_version_id = vmstate_esp.version_id; 1181 return 0; 1182 } 1183 1184 const VMStateDescription vmstate_esp = { 1185 .name = "esp", 1186 .version_id = 6, 1187 .minimum_version_id = 3, 1188 .post_load = esp_post_load, 1189 .fields = (VMStateField[]) { 1190 VMSTATE_BUFFER(rregs, ESPState), 1191 VMSTATE_BUFFER(wregs, ESPState), 1192 VMSTATE_INT32(ti_size, ESPState), 1193 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5), 1194 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5), 1195 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5), 1196 VMSTATE_UINT32(status, ESPState), 1197 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState, 1198 esp_is_before_version_5), 1199 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState, 1200 esp_is_before_version_5), 1201 VMSTATE_UINT32(dma, ESPState), 1202 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0, 1203 esp_is_before_version_5, 0, 16), 1204 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4, 1205 esp_is_before_version_5, 16, 1206 sizeof(typeof_field(ESPState, mig_cmdbuf))), 1207 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5), 1208 VMSTATE_UINT32(do_cmd, ESPState), 1209 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5), 1210 VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5), 1211 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5), 1212 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5), 1213 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5), 1214 VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5), 1215 VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6), 1216 VMSTATE_END_OF_LIST() 1217 }, 1218 }; 1219 1220 static void sysbus_esp_mem_write(void *opaque, hwaddr addr, 1221 uint64_t val, unsigned int size) 1222 { 1223 SysBusESPState *sysbus = opaque; 1224 ESPState *s = ESP(&sysbus->esp); 1225 uint32_t saddr; 1226 1227 saddr = addr >> sysbus->it_shift; 1228 esp_reg_write(s, saddr, val); 1229 } 1230 1231 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr, 1232 unsigned int size) 1233 { 1234 SysBusESPState *sysbus = opaque; 1235 ESPState *s = ESP(&sysbus->esp); 1236 uint32_t saddr; 1237 1238 saddr = addr >> sysbus->it_shift; 1239 return esp_reg_read(s, saddr); 1240 } 1241 1242 static const MemoryRegionOps sysbus_esp_mem_ops = { 1243 .read = sysbus_esp_mem_read, 1244 .write = sysbus_esp_mem_write, 1245 .endianness = DEVICE_NATIVE_ENDIAN, 1246 .valid.accepts = esp_mem_accepts, 1247 }; 1248 1249 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr, 1250 uint64_t val, unsigned int size) 1251 { 1252 SysBusESPState *sysbus = opaque; 1253 ESPState *s = ESP(&sysbus->esp); 1254 1255 trace_esp_pdma_write(size); 1256 1257 switch (size) { 1258 case 1: 1259 esp_pdma_write(s, val); 1260 break; 1261 case 2: 1262 esp_pdma_write(s, val >> 8); 1263 esp_pdma_write(s, val); 1264 break; 1265 } 1266 s->pdma_cb(s); 1267 } 1268 1269 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, 1270 unsigned int size) 1271 { 1272 SysBusESPState *sysbus = opaque; 1273 ESPState *s = ESP(&sysbus->esp); 1274 uint64_t val = 0; 1275 1276 trace_esp_pdma_read(size); 1277 1278 switch (size) { 1279 case 1: 1280 val = esp_pdma_read(s); 1281 break; 1282 case 2: 1283 val = esp_pdma_read(s); 1284 val = (val << 8) | esp_pdma_read(s); 1285 break; 1286 } 1287 if (fifo8_num_used(&s->fifo) < 2) { 1288 s->pdma_cb(s); 1289 } 1290 return val; 1291 } 1292 1293 static const MemoryRegionOps sysbus_esp_pdma_ops = { 1294 .read = sysbus_esp_pdma_read, 1295 .write = sysbus_esp_pdma_write, 1296 .endianness = DEVICE_NATIVE_ENDIAN, 1297 .valid.min_access_size = 1, 1298 .valid.max_access_size = 4, 1299 .impl.min_access_size = 1, 1300 .impl.max_access_size = 2, 1301 }; 1302 1303 static const struct SCSIBusInfo esp_scsi_info = { 1304 .tcq = false, 1305 .max_target = ESP_MAX_DEVS, 1306 .max_lun = 7, 1307 1308 .transfer_data = esp_transfer_data, 1309 .complete = esp_command_complete, 1310 .cancel = esp_request_cancelled 1311 }; 1312 1313 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level) 1314 { 1315 SysBusESPState *sysbus = SYSBUS_ESP(opaque); 1316 ESPState *s = ESP(&sysbus->esp); 1317 1318 switch (irq) { 1319 case 0: 1320 parent_esp_reset(s, irq, level); 1321 break; 1322 case 1: 1323 esp_dma_enable(opaque, irq, level); 1324 break; 1325 } 1326 } 1327 1328 static void sysbus_esp_realize(DeviceState *dev, Error **errp) 1329 { 1330 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1331 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1332 ESPState *s = ESP(&sysbus->esp); 1333 1334 if (!qdev_realize(DEVICE(s), NULL, errp)) { 1335 return; 1336 } 1337 1338 sysbus_init_irq(sbd, &s->irq); 1339 sysbus_init_irq(sbd, &s->irq_data); 1340 assert(sysbus->it_shift != -1); 1341 1342 s->chip_id = TCHI_FAS100A; 1343 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops, 1344 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift); 1345 sysbus_init_mmio(sbd, &sysbus->iomem); 1346 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops, 1347 sysbus, "esp-pdma", 4); 1348 sysbus_init_mmio(sbd, &sysbus->pdma); 1349 1350 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2); 1351 1352 scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info); 1353 } 1354 1355 static void sysbus_esp_hard_reset(DeviceState *dev) 1356 { 1357 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1358 ESPState *s = ESP(&sysbus->esp); 1359 1360 esp_hard_reset(s); 1361 } 1362 1363 static void sysbus_esp_init(Object *obj) 1364 { 1365 SysBusESPState *sysbus = SYSBUS_ESP(obj); 1366 1367 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP); 1368 } 1369 1370 static const VMStateDescription vmstate_sysbus_esp_scsi = { 1371 .name = "sysbusespscsi", 1372 .version_id = 2, 1373 .minimum_version_id = 1, 1374 .pre_save = esp_pre_save, 1375 .fields = (VMStateField[]) { 1376 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2), 1377 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState), 1378 VMSTATE_END_OF_LIST() 1379 } 1380 }; 1381 1382 static void sysbus_esp_class_init(ObjectClass *klass, void *data) 1383 { 1384 DeviceClass *dc = DEVICE_CLASS(klass); 1385 1386 dc->realize = sysbus_esp_realize; 1387 dc->reset = sysbus_esp_hard_reset; 1388 dc->vmsd = &vmstate_sysbus_esp_scsi; 1389 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1390 } 1391 1392 static const TypeInfo sysbus_esp_info = { 1393 .name = TYPE_SYSBUS_ESP, 1394 .parent = TYPE_SYS_BUS_DEVICE, 1395 .instance_init = sysbus_esp_init, 1396 .instance_size = sizeof(SysBusESPState), 1397 .class_init = sysbus_esp_class_init, 1398 }; 1399 1400 static void esp_finalize(Object *obj) 1401 { 1402 ESPState *s = ESP(obj); 1403 1404 fifo8_destroy(&s->fifo); 1405 fifo8_destroy(&s->cmdfifo); 1406 } 1407 1408 static void esp_init(Object *obj) 1409 { 1410 ESPState *s = ESP(obj); 1411 1412 fifo8_create(&s->fifo, ESP_FIFO_SZ); 1413 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ); 1414 } 1415 1416 static void esp_class_init(ObjectClass *klass, void *data) 1417 { 1418 DeviceClass *dc = DEVICE_CLASS(klass); 1419 1420 /* internal device for sysbusesp/pciespscsi, not user-creatable */ 1421 dc->user_creatable = false; 1422 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1423 } 1424 1425 static const TypeInfo esp_info = { 1426 .name = TYPE_ESP, 1427 .parent = TYPE_DEVICE, 1428 .instance_init = esp_init, 1429 .instance_finalize = esp_finalize, 1430 .instance_size = sizeof(ESPState), 1431 .class_init = esp_class_init, 1432 }; 1433 1434 static void esp_register_types(void) 1435 { 1436 type_register_static(&sysbus_esp_info); 1437 type_register_static(&esp_info); 1438 } 1439 1440 type_init(esp_register_types) 1441