1 /* 2 * QEMU ESP/NCR53C9x emulation 3 * 4 * Copyright (c) 2005-2006 Fabrice Bellard 5 * Copyright (c) 2012 Herve Poussineau 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "hw/sysbus.h" 28 #include "migration/vmstate.h" 29 #include "hw/irq.h" 30 #include "hw/scsi/esp.h" 31 #include "trace.h" 32 #include "qemu/log.h" 33 #include "qemu/module.h" 34 35 /* 36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O), 37 * also produced as NCR89C100. See 38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt 39 * and 40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt 41 * 42 * On Macintosh Quadra it is a NCR53C96. 43 */ 44 45 static void esp_raise_irq(ESPState *s) 46 { 47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) { 48 s->rregs[ESP_RSTAT] |= STAT_INT; 49 qemu_irq_raise(s->irq); 50 trace_esp_raise_irq(); 51 } 52 } 53 54 static void esp_lower_irq(ESPState *s) 55 { 56 if (s->rregs[ESP_RSTAT] & STAT_INT) { 57 s->rregs[ESP_RSTAT] &= ~STAT_INT; 58 qemu_irq_lower(s->irq); 59 trace_esp_lower_irq(); 60 } 61 } 62 63 static void esp_raise_drq(ESPState *s) 64 { 65 qemu_irq_raise(s->irq_data); 66 trace_esp_raise_drq(); 67 } 68 69 static void esp_lower_drq(ESPState *s) 70 { 71 qemu_irq_lower(s->irq_data); 72 trace_esp_lower_drq(); 73 } 74 75 void esp_dma_enable(ESPState *s, int irq, int level) 76 { 77 if (level) { 78 s->dma_enabled = 1; 79 trace_esp_dma_enable(); 80 if (s->dma_cb) { 81 s->dma_cb(s); 82 s->dma_cb = NULL; 83 } 84 } else { 85 trace_esp_dma_disable(); 86 s->dma_enabled = 0; 87 } 88 } 89 90 void esp_request_cancelled(SCSIRequest *req) 91 { 92 ESPState *s = req->hba_private; 93 94 if (req == s->current_req) { 95 scsi_req_unref(s->current_req); 96 s->current_req = NULL; 97 s->current_dev = NULL; 98 s->async_len = 0; 99 } 100 } 101 102 static void esp_fifo_push(Fifo8 *fifo, uint8_t val) 103 { 104 if (fifo8_num_used(fifo) == fifo->capacity) { 105 trace_esp_error_fifo_overrun(); 106 return; 107 } 108 109 fifo8_push(fifo, val); 110 } 111 112 static uint8_t esp_fifo_pop(Fifo8 *fifo) 113 { 114 if (fifo8_is_empty(fifo)) { 115 return 0; 116 } 117 118 return fifo8_pop(fifo); 119 } 120 121 static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen) 122 { 123 const uint8_t *buf; 124 uint32_t n; 125 126 if (maxlen == 0) { 127 return 0; 128 } 129 130 buf = fifo8_pop_buf(fifo, maxlen, &n); 131 if (dest) { 132 memcpy(dest, buf, n); 133 } 134 135 return n; 136 } 137 138 static uint32_t esp_get_tc(ESPState *s) 139 { 140 uint32_t dmalen; 141 142 dmalen = s->rregs[ESP_TCLO]; 143 dmalen |= s->rregs[ESP_TCMID] << 8; 144 dmalen |= s->rregs[ESP_TCHI] << 16; 145 146 return dmalen; 147 } 148 149 static void esp_set_tc(ESPState *s, uint32_t dmalen) 150 { 151 s->rregs[ESP_TCLO] = dmalen; 152 s->rregs[ESP_TCMID] = dmalen >> 8; 153 s->rregs[ESP_TCHI] = dmalen >> 16; 154 } 155 156 static uint32_t esp_get_stc(ESPState *s) 157 { 158 uint32_t dmalen; 159 160 dmalen = s->wregs[ESP_TCLO]; 161 dmalen |= s->wregs[ESP_TCMID] << 8; 162 dmalen |= s->wregs[ESP_TCHI] << 16; 163 164 return dmalen; 165 } 166 167 static uint8_t esp_pdma_read(ESPState *s) 168 { 169 uint8_t val; 170 171 if (s->do_cmd) { 172 val = esp_fifo_pop(&s->cmdfifo); 173 } else { 174 val = esp_fifo_pop(&s->fifo); 175 } 176 177 return val; 178 } 179 180 static void esp_pdma_write(ESPState *s, uint8_t val) 181 { 182 uint32_t dmalen = esp_get_tc(s); 183 184 if (dmalen == 0) { 185 return; 186 } 187 188 if (s->do_cmd) { 189 esp_fifo_push(&s->cmdfifo, val); 190 } else { 191 esp_fifo_push(&s->fifo, val); 192 } 193 194 dmalen--; 195 esp_set_tc(s, dmalen); 196 } 197 198 static int esp_select(ESPState *s) 199 { 200 int target; 201 202 target = s->wregs[ESP_WBUSID] & BUSID_DID; 203 204 s->ti_size = 0; 205 fifo8_reset(&s->fifo); 206 207 if (s->current_req) { 208 /* Started a new command before the old one finished. Cancel it. */ 209 scsi_req_cancel(s->current_req); 210 } 211 212 s->current_dev = scsi_device_find(&s->bus, 0, target, 0); 213 if (!s->current_dev) { 214 /* No such drive */ 215 s->rregs[ESP_RSTAT] = 0; 216 s->rregs[ESP_RINTR] = INTR_DC; 217 s->rregs[ESP_RSEQ] = SEQ_0; 218 esp_raise_irq(s); 219 return -1; 220 } 221 222 /* 223 * Note that we deliberately don't raise the IRQ here: this will be done 224 * either in do_command_phase() for DATA OUT transfers or by the deferred 225 * IRQ mechanism in esp_transfer_data() for DATA IN transfers 226 */ 227 s->rregs[ESP_RINTR] |= INTR_FC; 228 s->rregs[ESP_RSEQ] = SEQ_CD; 229 return 0; 230 } 231 232 static uint32_t get_cmd(ESPState *s, uint32_t maxlen) 233 { 234 uint8_t buf[ESP_CMDFIFO_SZ]; 235 uint32_t dmalen, n; 236 int target; 237 238 target = s->wregs[ESP_WBUSID] & BUSID_DID; 239 if (s->dma) { 240 dmalen = MIN(esp_get_tc(s), maxlen); 241 if (dmalen == 0) { 242 return 0; 243 } 244 if (s->dma_memory_read) { 245 s->dma_memory_read(s->dma_opaque, buf, dmalen); 246 dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen); 247 fifo8_push_all(&s->cmdfifo, buf, dmalen); 248 } else { 249 if (esp_select(s) < 0) { 250 fifo8_reset(&s->cmdfifo); 251 return -1; 252 } 253 esp_raise_drq(s); 254 fifo8_reset(&s->cmdfifo); 255 return 0; 256 } 257 } else { 258 dmalen = MIN(fifo8_num_used(&s->fifo), maxlen); 259 if (dmalen == 0) { 260 return 0; 261 } 262 n = esp_fifo_pop_buf(&s->fifo, buf, dmalen); 263 n = MIN(fifo8_num_free(&s->cmdfifo), n); 264 fifo8_push_all(&s->cmdfifo, buf, n); 265 } 266 trace_esp_get_cmd(dmalen, target); 267 268 if (esp_select(s) < 0) { 269 fifo8_reset(&s->cmdfifo); 270 return -1; 271 } 272 return dmalen; 273 } 274 275 static void do_command_phase(ESPState *s) 276 { 277 uint32_t cmdlen; 278 int32_t datalen; 279 SCSIDevice *current_lun; 280 uint8_t buf[ESP_CMDFIFO_SZ]; 281 282 trace_esp_do_command_phase(s->lun); 283 cmdlen = fifo8_num_used(&s->cmdfifo); 284 if (!cmdlen || !s->current_dev) { 285 return; 286 } 287 esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen); 288 289 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun); 290 s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, s); 291 datalen = scsi_req_enqueue(s->current_req); 292 s->ti_size = datalen; 293 fifo8_reset(&s->cmdfifo); 294 if (datalen != 0) { 295 s->rregs[ESP_RSTAT] = STAT_TC; 296 s->rregs[ESP_RSEQ] = SEQ_CD; 297 s->ti_cmd = 0; 298 esp_set_tc(s, 0); 299 if (datalen > 0) { 300 /* 301 * Switch to DATA IN phase but wait until initial data xfer is 302 * complete before raising the command completion interrupt 303 */ 304 s->data_in_ready = false; 305 s->rregs[ESP_RSTAT] |= STAT_DI; 306 } else { 307 s->rregs[ESP_RSTAT] |= STAT_DO; 308 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 309 esp_raise_irq(s); 310 esp_lower_drq(s); 311 } 312 scsi_req_continue(s->current_req); 313 return; 314 } 315 } 316 317 static void do_message_phase(ESPState *s) 318 { 319 if (s->cmdfifo_cdb_offset) { 320 uint8_t message = esp_fifo_pop(&s->cmdfifo); 321 322 trace_esp_do_identify(message); 323 s->lun = message & 7; 324 s->cmdfifo_cdb_offset--; 325 } 326 327 /* Ignore extended messages for now */ 328 if (s->cmdfifo_cdb_offset) { 329 int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo)); 330 esp_fifo_pop_buf(&s->cmdfifo, NULL, len); 331 s->cmdfifo_cdb_offset = 0; 332 } 333 } 334 335 static void do_cmd(ESPState *s) 336 { 337 do_message_phase(s); 338 assert(s->cmdfifo_cdb_offset == 0); 339 do_command_phase(s); 340 } 341 342 static void satn_pdma_cb(ESPState *s) 343 { 344 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 345 s->cmdfifo_cdb_offset = 1; 346 s->do_cmd = 0; 347 do_cmd(s); 348 } 349 } 350 351 static void handle_satn(ESPState *s) 352 { 353 int32_t cmdlen; 354 355 if (s->dma && !s->dma_enabled) { 356 s->dma_cb = handle_satn; 357 return; 358 } 359 s->pdma_cb = satn_pdma_cb; 360 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 361 if (cmdlen > 0) { 362 s->cmdfifo_cdb_offset = 1; 363 s->do_cmd = 0; 364 do_cmd(s); 365 } else if (cmdlen == 0) { 366 s->do_cmd = 1; 367 /* Target present, but no cmd yet - switch to command phase */ 368 s->rregs[ESP_RSEQ] = SEQ_CD; 369 s->rregs[ESP_RSTAT] = STAT_CD; 370 } 371 } 372 373 static void s_without_satn_pdma_cb(ESPState *s) 374 { 375 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 376 s->cmdfifo_cdb_offset = 0; 377 s->do_cmd = 0; 378 do_cmd(s); 379 } 380 } 381 382 static void handle_s_without_atn(ESPState *s) 383 { 384 int32_t cmdlen; 385 386 if (s->dma && !s->dma_enabled) { 387 s->dma_cb = handle_s_without_atn; 388 return; 389 } 390 s->pdma_cb = s_without_satn_pdma_cb; 391 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 392 if (cmdlen > 0) { 393 s->cmdfifo_cdb_offset = 0; 394 s->do_cmd = 0; 395 do_cmd(s); 396 } else if (cmdlen == 0) { 397 s->do_cmd = 1; 398 /* Target present, but no cmd yet - switch to command phase */ 399 s->rregs[ESP_RSEQ] = SEQ_CD; 400 s->rregs[ESP_RSTAT] = STAT_CD; 401 } 402 } 403 404 static void satn_stop_pdma_cb(ESPState *s) 405 { 406 if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) { 407 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 408 s->do_cmd = 1; 409 s->cmdfifo_cdb_offset = 1; 410 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 411 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 412 s->rregs[ESP_RSEQ] = SEQ_CD; 413 esp_raise_irq(s); 414 } 415 } 416 417 static void handle_satn_stop(ESPState *s) 418 { 419 int32_t cmdlen; 420 421 if (s->dma && !s->dma_enabled) { 422 s->dma_cb = handle_satn_stop; 423 return; 424 } 425 s->pdma_cb = satn_stop_pdma_cb; 426 cmdlen = get_cmd(s, 1); 427 if (cmdlen > 0) { 428 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 429 s->do_cmd = 1; 430 s->cmdfifo_cdb_offset = 1; 431 s->rregs[ESP_RSTAT] = STAT_MO; 432 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 433 s->rregs[ESP_RSEQ] = SEQ_MO; 434 esp_raise_irq(s); 435 } else if (cmdlen == 0) { 436 s->do_cmd = 1; 437 /* Target present, switch to message out phase */ 438 s->rregs[ESP_RSEQ] = SEQ_MO; 439 s->rregs[ESP_RSTAT] = STAT_MO; 440 } 441 } 442 443 static void write_response_pdma_cb(ESPState *s) 444 { 445 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 446 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 447 s->rregs[ESP_RSEQ] = SEQ_CD; 448 esp_raise_irq(s); 449 } 450 451 static void write_response(ESPState *s) 452 { 453 uint8_t buf[2]; 454 455 trace_esp_write_response(s->status); 456 457 buf[0] = s->status; 458 buf[1] = 0; 459 460 if (s->dma) { 461 if (s->dma_memory_write) { 462 s->dma_memory_write(s->dma_opaque, buf, 2); 463 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 464 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 465 s->rregs[ESP_RSEQ] = SEQ_CD; 466 } else { 467 s->pdma_cb = write_response_pdma_cb; 468 esp_raise_drq(s); 469 return; 470 } 471 } else { 472 fifo8_reset(&s->fifo); 473 fifo8_push_all(&s->fifo, buf, 2); 474 s->rregs[ESP_RFLAGS] = 2; 475 } 476 esp_raise_irq(s); 477 } 478 479 static void esp_dma_done(ESPState *s) 480 { 481 s->rregs[ESP_RSTAT] |= STAT_TC; 482 s->rregs[ESP_RINTR] |= INTR_BS; 483 s->rregs[ESP_RFLAGS] = 0; 484 esp_set_tc(s, 0); 485 esp_raise_irq(s); 486 } 487 488 static void do_dma_pdma_cb(ESPState *s) 489 { 490 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 491 int len; 492 uint32_t n; 493 494 if (s->do_cmd) { 495 /* Ensure we have received complete command after SATN and stop */ 496 if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) { 497 return; 498 } 499 500 s->ti_size = 0; 501 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 502 /* No command received */ 503 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 504 return; 505 } 506 507 /* Command has been received */ 508 s->do_cmd = 0; 509 do_cmd(s); 510 } else { 511 /* 512 * Extra message out bytes received: update cmdfifo_cdb_offset 513 * and then switch to commmand phase 514 */ 515 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 516 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 517 s->rregs[ESP_RSEQ] = SEQ_CD; 518 s->rregs[ESP_RINTR] |= INTR_BS; 519 esp_raise_irq(s); 520 } 521 return; 522 } 523 524 if (!s->current_req) { 525 return; 526 } 527 528 if (to_device) { 529 /* Copy FIFO data to device */ 530 len = MIN(s->async_len, ESP_FIFO_SZ); 531 len = MIN(len, fifo8_num_used(&s->fifo)); 532 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 533 s->async_buf += n; 534 s->async_len -= n; 535 s->ti_size += n; 536 537 if (n < len) { 538 /* Unaligned accesses can cause FIFO wraparound */ 539 len = len - n; 540 n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 541 s->async_buf += n; 542 s->async_len -= n; 543 s->ti_size += n; 544 } 545 546 if (s->async_len == 0) { 547 scsi_req_continue(s->current_req); 548 return; 549 } 550 551 if (esp_get_tc(s) == 0) { 552 esp_lower_drq(s); 553 esp_dma_done(s); 554 } 555 556 return; 557 } else { 558 if (s->async_len == 0) { 559 /* Defer until the scsi layer has completed */ 560 scsi_req_continue(s->current_req); 561 s->data_in_ready = false; 562 return; 563 } 564 565 if (esp_get_tc(s) != 0) { 566 /* Copy device data to FIFO */ 567 len = MIN(s->async_len, esp_get_tc(s)); 568 len = MIN(len, fifo8_num_free(&s->fifo)); 569 fifo8_push_all(&s->fifo, s->async_buf, len); 570 s->async_buf += len; 571 s->async_len -= len; 572 s->ti_size -= len; 573 esp_set_tc(s, esp_get_tc(s) - len); 574 575 if (esp_get_tc(s) == 0) { 576 /* Indicate transfer to FIFO is complete */ 577 s->rregs[ESP_RSTAT] |= STAT_TC; 578 } 579 return; 580 } 581 582 /* Partially filled a scsi buffer. Complete immediately. */ 583 esp_lower_drq(s); 584 esp_dma_done(s); 585 } 586 } 587 588 static void esp_do_dma(ESPState *s) 589 { 590 uint32_t len, cmdlen; 591 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 592 uint8_t buf[ESP_CMDFIFO_SZ]; 593 594 len = esp_get_tc(s); 595 if (s->do_cmd) { 596 /* 597 * handle_ti_cmd() case: esp_do_dma() is called only from 598 * handle_ti_cmd() with do_cmd != NULL (see the assert()) 599 */ 600 cmdlen = fifo8_num_used(&s->cmdfifo); 601 trace_esp_do_dma(cmdlen, len); 602 if (s->dma_memory_read) { 603 len = MIN(len, fifo8_num_free(&s->cmdfifo)); 604 s->dma_memory_read(s->dma_opaque, buf, len); 605 fifo8_push_all(&s->cmdfifo, buf, len); 606 } else { 607 s->pdma_cb = do_dma_pdma_cb; 608 esp_raise_drq(s); 609 return; 610 } 611 trace_esp_handle_ti_cmd(cmdlen); 612 s->ti_size = 0; 613 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 614 /* No command received */ 615 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 616 return; 617 } 618 619 /* Command has been received */ 620 s->do_cmd = 0; 621 do_cmd(s); 622 } else { 623 /* 624 * Extra message out bytes received: update cmdfifo_cdb_offset 625 * and then switch to commmand phase 626 */ 627 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 628 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 629 s->rregs[ESP_RSEQ] = SEQ_CD; 630 s->rregs[ESP_RINTR] |= INTR_BS; 631 esp_raise_irq(s); 632 } 633 return; 634 } 635 if (!s->current_req) { 636 return; 637 } 638 if (s->async_len == 0) { 639 /* Defer until data is available. */ 640 return; 641 } 642 if (len > s->async_len) { 643 len = s->async_len; 644 } 645 if (to_device) { 646 if (s->dma_memory_read) { 647 s->dma_memory_read(s->dma_opaque, s->async_buf, len); 648 } else { 649 s->pdma_cb = do_dma_pdma_cb; 650 esp_raise_drq(s); 651 return; 652 } 653 } else { 654 if (s->dma_memory_write) { 655 s->dma_memory_write(s->dma_opaque, s->async_buf, len); 656 } else { 657 /* Adjust TC for any leftover data in the FIFO */ 658 if (!fifo8_is_empty(&s->fifo)) { 659 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo)); 660 } 661 662 /* Copy device data to FIFO */ 663 len = MIN(len, fifo8_num_free(&s->fifo)); 664 fifo8_push_all(&s->fifo, s->async_buf, len); 665 s->async_buf += len; 666 s->async_len -= len; 667 s->ti_size -= len; 668 669 /* 670 * MacOS toolbox uses a TI length of 16 bytes for all commands, so 671 * commands shorter than this must be padded accordingly 672 */ 673 if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) { 674 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) { 675 esp_fifo_push(&s->fifo, 0); 676 len++; 677 } 678 } 679 680 esp_set_tc(s, esp_get_tc(s) - len); 681 s->pdma_cb = do_dma_pdma_cb; 682 esp_raise_drq(s); 683 684 /* Indicate transfer to FIFO is complete */ 685 s->rregs[ESP_RSTAT] |= STAT_TC; 686 return; 687 } 688 } 689 esp_set_tc(s, esp_get_tc(s) - len); 690 s->async_buf += len; 691 s->async_len -= len; 692 if (to_device) { 693 s->ti_size += len; 694 } else { 695 s->ti_size -= len; 696 } 697 if (s->async_len == 0) { 698 scsi_req_continue(s->current_req); 699 /* 700 * If there is still data to be read from the device then 701 * complete the DMA operation immediately. Otherwise defer 702 * until the scsi layer has completed. 703 */ 704 if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) { 705 return; 706 } 707 } 708 709 /* Partially filled a scsi buffer. Complete immediately. */ 710 esp_dma_done(s); 711 esp_lower_drq(s); 712 } 713 714 static void esp_do_nodma(ESPState *s) 715 { 716 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 717 uint32_t cmdlen; 718 int len; 719 720 if (s->do_cmd) { 721 cmdlen = fifo8_num_used(&s->cmdfifo); 722 trace_esp_handle_ti_cmd(cmdlen); 723 s->ti_size = 0; 724 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 725 /* No command received */ 726 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 727 return; 728 } 729 730 /* Command has been received */ 731 s->do_cmd = 0; 732 do_cmd(s); 733 } else { 734 /* 735 * Extra message out bytes received: update cmdfifo_cdb_offset 736 * and then switch to commmand phase 737 */ 738 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 739 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 740 s->rregs[ESP_RSEQ] = SEQ_CD; 741 s->rregs[ESP_RINTR] |= INTR_BS; 742 esp_raise_irq(s); 743 } 744 return; 745 } 746 747 if (!s->current_req) { 748 return; 749 } 750 751 if (s->async_len == 0) { 752 /* Defer until data is available. */ 753 return; 754 } 755 756 if (to_device) { 757 len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ); 758 esp_fifo_pop_buf(&s->fifo, s->async_buf, len); 759 s->async_buf += len; 760 s->async_len -= len; 761 s->ti_size += len; 762 } else { 763 if (fifo8_is_empty(&s->fifo)) { 764 fifo8_push(&s->fifo, s->async_buf[0]); 765 s->async_buf++; 766 s->async_len--; 767 s->ti_size--; 768 } 769 } 770 771 if (s->async_len == 0) { 772 scsi_req_continue(s->current_req); 773 return; 774 } 775 776 s->rregs[ESP_RINTR] |= INTR_BS; 777 esp_raise_irq(s); 778 } 779 780 void esp_command_complete(SCSIRequest *req, size_t resid) 781 { 782 ESPState *s = req->hba_private; 783 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 784 785 trace_esp_command_complete(); 786 787 /* 788 * Non-DMA transfers from the target will leave the last byte in 789 * the FIFO so don't reset ti_size in this case 790 */ 791 if (s->dma || to_device) { 792 if (s->ti_size != 0) { 793 trace_esp_command_complete_unexpected(); 794 } 795 s->ti_size = 0; 796 } 797 798 s->async_len = 0; 799 if (req->status) { 800 trace_esp_command_complete_fail(); 801 } 802 s->status = req->status; 803 804 /* 805 * If the transfer is finished, switch to status phase. For non-DMA 806 * transfers from the target the last byte is still in the FIFO 807 */ 808 if (s->ti_size == 0) { 809 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 810 esp_dma_done(s); 811 esp_lower_drq(s); 812 } 813 814 if (s->current_req) { 815 scsi_req_unref(s->current_req); 816 s->current_req = NULL; 817 s->current_dev = NULL; 818 } 819 } 820 821 void esp_transfer_data(SCSIRequest *req, uint32_t len) 822 { 823 ESPState *s = req->hba_private; 824 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 825 uint32_t dmalen = esp_get_tc(s); 826 827 assert(!s->do_cmd); 828 trace_esp_transfer_data(dmalen, s->ti_size); 829 s->async_len = len; 830 s->async_buf = scsi_req_get_buf(req); 831 832 if (!to_device && !s->data_in_ready) { 833 /* 834 * Initial incoming data xfer is complete so raise command 835 * completion interrupt 836 */ 837 s->data_in_ready = true; 838 s->rregs[ESP_RSTAT] |= STAT_TC; 839 s->rregs[ESP_RINTR] |= INTR_BS; 840 esp_raise_irq(s); 841 } 842 843 if (s->ti_cmd == 0) { 844 /* 845 * Always perform the initial transfer upon reception of the next TI 846 * command to ensure the DMA/non-DMA status of the command is correct. 847 * It is not possible to use s->dma directly in the section below as 848 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the 849 * async data transfer is delayed then s->dma is set incorrectly. 850 */ 851 return; 852 } 853 854 if (s->ti_cmd == (CMD_TI | CMD_DMA)) { 855 if (dmalen) { 856 esp_do_dma(s); 857 } else if (s->ti_size <= 0) { 858 /* 859 * If this was the last part of a DMA transfer then the 860 * completion interrupt is deferred to here. 861 */ 862 esp_dma_done(s); 863 esp_lower_drq(s); 864 } 865 } else if (s->ti_cmd == CMD_TI) { 866 esp_do_nodma(s); 867 } 868 } 869 870 static void handle_ti(ESPState *s) 871 { 872 uint32_t dmalen; 873 874 if (s->dma && !s->dma_enabled) { 875 s->dma_cb = handle_ti; 876 return; 877 } 878 879 s->ti_cmd = s->rregs[ESP_CMD]; 880 if (s->dma) { 881 dmalen = esp_get_tc(s); 882 trace_esp_handle_ti(dmalen); 883 s->rregs[ESP_RSTAT] &= ~STAT_TC; 884 esp_do_dma(s); 885 } else { 886 trace_esp_handle_ti(s->ti_size); 887 esp_do_nodma(s); 888 } 889 } 890 891 void esp_hard_reset(ESPState *s) 892 { 893 memset(s->rregs, 0, ESP_REGS); 894 memset(s->wregs, 0, ESP_REGS); 895 s->tchi_written = 0; 896 s->ti_size = 0; 897 fifo8_reset(&s->fifo); 898 fifo8_reset(&s->cmdfifo); 899 s->dma = 0; 900 s->do_cmd = 0; 901 s->dma_cb = NULL; 902 903 s->rregs[ESP_CFG1] = 7; 904 } 905 906 static void esp_soft_reset(ESPState *s) 907 { 908 qemu_irq_lower(s->irq); 909 qemu_irq_lower(s->irq_data); 910 esp_hard_reset(s); 911 } 912 913 static void parent_esp_reset(ESPState *s, int irq, int level) 914 { 915 if (level) { 916 esp_soft_reset(s); 917 } 918 } 919 920 uint64_t esp_reg_read(ESPState *s, uint32_t saddr) 921 { 922 uint32_t val; 923 924 switch (saddr) { 925 case ESP_FIFO: 926 if (s->dma_memory_read && s->dma_memory_write && 927 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) { 928 /* Data out. */ 929 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n"); 930 s->rregs[ESP_FIFO] = 0; 931 } else { 932 if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) { 933 if (s->ti_size) { 934 esp_do_nodma(s); 935 } else { 936 /* 937 * The last byte of a non-DMA transfer has been read out 938 * of the FIFO so switch to status phase 939 */ 940 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 941 } 942 } 943 s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo); 944 } 945 val = s->rregs[ESP_FIFO]; 946 break; 947 case ESP_RINTR: 948 /* 949 * Clear sequence step, interrupt register and all status bits 950 * except TC 951 */ 952 val = s->rregs[ESP_RINTR]; 953 s->rregs[ESP_RINTR] = 0; 954 s->rregs[ESP_RSTAT] &= ~STAT_TC; 955 /* 956 * According to the datasheet ESP_RSEQ should be cleared, but as the 957 * emulation currently defers information transfers to the next TI 958 * command leave it for now so that pedantic guests such as the old 959 * Linux 2.6 driver see the correct flags before the next SCSI phase 960 * transition. 961 * 962 * s->rregs[ESP_RSEQ] = SEQ_0; 963 */ 964 esp_lower_irq(s); 965 break; 966 case ESP_TCHI: 967 /* Return the unique id if the value has never been written */ 968 if (!s->tchi_written) { 969 val = s->chip_id; 970 } else { 971 val = s->rregs[saddr]; 972 } 973 break; 974 case ESP_RFLAGS: 975 /* Bottom 5 bits indicate number of bytes in FIFO */ 976 val = fifo8_num_used(&s->fifo); 977 break; 978 default: 979 val = s->rregs[saddr]; 980 break; 981 } 982 983 trace_esp_mem_readb(saddr, val); 984 return val; 985 } 986 987 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) 988 { 989 trace_esp_mem_writeb(saddr, s->wregs[saddr], val); 990 switch (saddr) { 991 case ESP_TCHI: 992 s->tchi_written = true; 993 /* fall through */ 994 case ESP_TCLO: 995 case ESP_TCMID: 996 s->rregs[ESP_RSTAT] &= ~STAT_TC; 997 break; 998 case ESP_FIFO: 999 if (s->do_cmd) { 1000 esp_fifo_push(&s->cmdfifo, val); 1001 1002 /* 1003 * If any unexpected message out/command phase data is 1004 * transferred using non-DMA, raise the interrupt 1005 */ 1006 if (s->rregs[ESP_CMD] == CMD_TI) { 1007 s->rregs[ESP_RINTR] |= INTR_BS; 1008 esp_raise_irq(s); 1009 } 1010 } else { 1011 esp_fifo_push(&s->fifo, val); 1012 } 1013 break; 1014 case ESP_CMD: 1015 s->rregs[saddr] = val; 1016 if (val & CMD_DMA) { 1017 s->dma = 1; 1018 /* Reload DMA counter. */ 1019 if (esp_get_stc(s) == 0) { 1020 esp_set_tc(s, 0x10000); 1021 } else { 1022 esp_set_tc(s, esp_get_stc(s)); 1023 } 1024 } else { 1025 s->dma = 0; 1026 } 1027 switch (val & CMD_CMD) { 1028 case CMD_NOP: 1029 trace_esp_mem_writeb_cmd_nop(val); 1030 break; 1031 case CMD_FLUSH: 1032 trace_esp_mem_writeb_cmd_flush(val); 1033 fifo8_reset(&s->fifo); 1034 break; 1035 case CMD_RESET: 1036 trace_esp_mem_writeb_cmd_reset(val); 1037 esp_soft_reset(s); 1038 break; 1039 case CMD_BUSRESET: 1040 trace_esp_mem_writeb_cmd_bus_reset(val); 1041 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { 1042 s->rregs[ESP_RINTR] |= INTR_RST; 1043 esp_raise_irq(s); 1044 } 1045 break; 1046 case CMD_TI: 1047 trace_esp_mem_writeb_cmd_ti(val); 1048 handle_ti(s); 1049 break; 1050 case CMD_ICCS: 1051 trace_esp_mem_writeb_cmd_iccs(val); 1052 write_response(s); 1053 s->rregs[ESP_RINTR] |= INTR_FC; 1054 s->rregs[ESP_RSTAT] |= STAT_MI; 1055 break; 1056 case CMD_MSGACC: 1057 trace_esp_mem_writeb_cmd_msgacc(val); 1058 s->rregs[ESP_RINTR] |= INTR_DC; 1059 s->rregs[ESP_RSEQ] = 0; 1060 s->rregs[ESP_RFLAGS] = 0; 1061 esp_raise_irq(s); 1062 break; 1063 case CMD_PAD: 1064 trace_esp_mem_writeb_cmd_pad(val); 1065 s->rregs[ESP_RSTAT] = STAT_TC; 1066 s->rregs[ESP_RINTR] |= INTR_FC; 1067 s->rregs[ESP_RSEQ] = 0; 1068 break; 1069 case CMD_SATN: 1070 trace_esp_mem_writeb_cmd_satn(val); 1071 break; 1072 case CMD_RSTATN: 1073 trace_esp_mem_writeb_cmd_rstatn(val); 1074 break; 1075 case CMD_SEL: 1076 trace_esp_mem_writeb_cmd_sel(val); 1077 handle_s_without_atn(s); 1078 break; 1079 case CMD_SELATN: 1080 trace_esp_mem_writeb_cmd_selatn(val); 1081 handle_satn(s); 1082 break; 1083 case CMD_SELATNS: 1084 trace_esp_mem_writeb_cmd_selatns(val); 1085 handle_satn_stop(s); 1086 break; 1087 case CMD_ENSEL: 1088 trace_esp_mem_writeb_cmd_ensel(val); 1089 s->rregs[ESP_RINTR] = 0; 1090 break; 1091 case CMD_DISSEL: 1092 trace_esp_mem_writeb_cmd_dissel(val); 1093 s->rregs[ESP_RINTR] = 0; 1094 esp_raise_irq(s); 1095 break; 1096 default: 1097 trace_esp_error_unhandled_command(val); 1098 break; 1099 } 1100 break; 1101 case ESP_WBUSID ... ESP_WSYNO: 1102 break; 1103 case ESP_CFG1: 1104 case ESP_CFG2: case ESP_CFG3: 1105 case ESP_RES3: case ESP_RES4: 1106 s->rregs[saddr] = val; 1107 break; 1108 case ESP_WCCF ... ESP_WTEST: 1109 break; 1110 default: 1111 trace_esp_error_invalid_write(val, saddr); 1112 return; 1113 } 1114 s->wregs[saddr] = val; 1115 } 1116 1117 static bool esp_mem_accepts(void *opaque, hwaddr addr, 1118 unsigned size, bool is_write, 1119 MemTxAttrs attrs) 1120 { 1121 return (size == 1) || (is_write && size == 4); 1122 } 1123 1124 static bool esp_is_before_version_5(void *opaque, int version_id) 1125 { 1126 ESPState *s = ESP(opaque); 1127 1128 version_id = MIN(version_id, s->mig_version_id); 1129 return version_id < 5; 1130 } 1131 1132 static bool esp_is_version_5(void *opaque, int version_id) 1133 { 1134 ESPState *s = ESP(opaque); 1135 1136 version_id = MIN(version_id, s->mig_version_id); 1137 return version_id >= 5; 1138 } 1139 1140 static bool esp_is_version_6(void *opaque, int version_id) 1141 { 1142 ESPState *s = ESP(opaque); 1143 1144 version_id = MIN(version_id, s->mig_version_id); 1145 return version_id >= 6; 1146 } 1147 1148 int esp_pre_save(void *opaque) 1149 { 1150 ESPState *s = ESP(object_resolve_path_component( 1151 OBJECT(opaque), "esp")); 1152 1153 s->mig_version_id = vmstate_esp.version_id; 1154 return 0; 1155 } 1156 1157 static int esp_post_load(void *opaque, int version_id) 1158 { 1159 ESPState *s = ESP(opaque); 1160 int len, i; 1161 1162 version_id = MIN(version_id, s->mig_version_id); 1163 1164 if (version_id < 5) { 1165 esp_set_tc(s, s->mig_dma_left); 1166 1167 /* Migrate ti_buf to fifo */ 1168 len = s->mig_ti_wptr - s->mig_ti_rptr; 1169 for (i = 0; i < len; i++) { 1170 fifo8_push(&s->fifo, s->mig_ti_buf[i]); 1171 } 1172 1173 /* Migrate cmdbuf to cmdfifo */ 1174 for (i = 0; i < s->mig_cmdlen; i++) { 1175 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]); 1176 } 1177 } 1178 1179 s->mig_version_id = vmstate_esp.version_id; 1180 return 0; 1181 } 1182 1183 const VMStateDescription vmstate_esp = { 1184 .name = "esp", 1185 .version_id = 6, 1186 .minimum_version_id = 3, 1187 .post_load = esp_post_load, 1188 .fields = (VMStateField[]) { 1189 VMSTATE_BUFFER(rregs, ESPState), 1190 VMSTATE_BUFFER(wregs, ESPState), 1191 VMSTATE_INT32(ti_size, ESPState), 1192 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5), 1193 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5), 1194 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5), 1195 VMSTATE_UINT32(status, ESPState), 1196 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState, 1197 esp_is_before_version_5), 1198 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState, 1199 esp_is_before_version_5), 1200 VMSTATE_UINT32(dma, ESPState), 1201 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0, 1202 esp_is_before_version_5, 0, 16), 1203 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4, 1204 esp_is_before_version_5, 16, 1205 sizeof(typeof_field(ESPState, mig_cmdbuf))), 1206 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5), 1207 VMSTATE_UINT32(do_cmd, ESPState), 1208 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5), 1209 VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5), 1210 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5), 1211 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5), 1212 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5), 1213 VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5), 1214 VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6), 1215 VMSTATE_END_OF_LIST() 1216 }, 1217 }; 1218 1219 static void sysbus_esp_mem_write(void *opaque, hwaddr addr, 1220 uint64_t val, unsigned int size) 1221 { 1222 SysBusESPState *sysbus = opaque; 1223 ESPState *s = ESP(&sysbus->esp); 1224 uint32_t saddr; 1225 1226 saddr = addr >> sysbus->it_shift; 1227 esp_reg_write(s, saddr, val); 1228 } 1229 1230 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr, 1231 unsigned int size) 1232 { 1233 SysBusESPState *sysbus = opaque; 1234 ESPState *s = ESP(&sysbus->esp); 1235 uint32_t saddr; 1236 1237 saddr = addr >> sysbus->it_shift; 1238 return esp_reg_read(s, saddr); 1239 } 1240 1241 static const MemoryRegionOps sysbus_esp_mem_ops = { 1242 .read = sysbus_esp_mem_read, 1243 .write = sysbus_esp_mem_write, 1244 .endianness = DEVICE_NATIVE_ENDIAN, 1245 .valid.accepts = esp_mem_accepts, 1246 }; 1247 1248 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr, 1249 uint64_t val, unsigned int size) 1250 { 1251 SysBusESPState *sysbus = opaque; 1252 ESPState *s = ESP(&sysbus->esp); 1253 1254 trace_esp_pdma_write(size); 1255 1256 switch (size) { 1257 case 1: 1258 esp_pdma_write(s, val); 1259 break; 1260 case 2: 1261 esp_pdma_write(s, val >> 8); 1262 esp_pdma_write(s, val); 1263 break; 1264 } 1265 s->pdma_cb(s); 1266 } 1267 1268 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, 1269 unsigned int size) 1270 { 1271 SysBusESPState *sysbus = opaque; 1272 ESPState *s = ESP(&sysbus->esp); 1273 uint64_t val = 0; 1274 1275 trace_esp_pdma_read(size); 1276 1277 switch (size) { 1278 case 1: 1279 val = esp_pdma_read(s); 1280 break; 1281 case 2: 1282 val = esp_pdma_read(s); 1283 val = (val << 8) | esp_pdma_read(s); 1284 break; 1285 } 1286 if (fifo8_num_used(&s->fifo) < 2) { 1287 s->pdma_cb(s); 1288 } 1289 return val; 1290 } 1291 1292 static const MemoryRegionOps sysbus_esp_pdma_ops = { 1293 .read = sysbus_esp_pdma_read, 1294 .write = sysbus_esp_pdma_write, 1295 .endianness = DEVICE_NATIVE_ENDIAN, 1296 .valid.min_access_size = 1, 1297 .valid.max_access_size = 4, 1298 .impl.min_access_size = 1, 1299 .impl.max_access_size = 2, 1300 }; 1301 1302 static const struct SCSIBusInfo esp_scsi_info = { 1303 .tcq = false, 1304 .max_target = ESP_MAX_DEVS, 1305 .max_lun = 7, 1306 1307 .transfer_data = esp_transfer_data, 1308 .complete = esp_command_complete, 1309 .cancel = esp_request_cancelled 1310 }; 1311 1312 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level) 1313 { 1314 SysBusESPState *sysbus = SYSBUS_ESP(opaque); 1315 ESPState *s = ESP(&sysbus->esp); 1316 1317 switch (irq) { 1318 case 0: 1319 parent_esp_reset(s, irq, level); 1320 break; 1321 case 1: 1322 esp_dma_enable(opaque, irq, level); 1323 break; 1324 } 1325 } 1326 1327 static void sysbus_esp_realize(DeviceState *dev, Error **errp) 1328 { 1329 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1330 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1331 ESPState *s = ESP(&sysbus->esp); 1332 1333 if (!qdev_realize(DEVICE(s), NULL, errp)) { 1334 return; 1335 } 1336 1337 sysbus_init_irq(sbd, &s->irq); 1338 sysbus_init_irq(sbd, &s->irq_data); 1339 assert(sysbus->it_shift != -1); 1340 1341 s->chip_id = TCHI_FAS100A; 1342 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops, 1343 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift); 1344 sysbus_init_mmio(sbd, &sysbus->iomem); 1345 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops, 1346 sysbus, "esp-pdma", 4); 1347 sysbus_init_mmio(sbd, &sysbus->pdma); 1348 1349 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2); 1350 1351 scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL); 1352 } 1353 1354 static void sysbus_esp_hard_reset(DeviceState *dev) 1355 { 1356 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1357 ESPState *s = ESP(&sysbus->esp); 1358 1359 esp_hard_reset(s); 1360 } 1361 1362 static void sysbus_esp_init(Object *obj) 1363 { 1364 SysBusESPState *sysbus = SYSBUS_ESP(obj); 1365 1366 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP); 1367 } 1368 1369 static const VMStateDescription vmstate_sysbus_esp_scsi = { 1370 .name = "sysbusespscsi", 1371 .version_id = 2, 1372 .minimum_version_id = 1, 1373 .pre_save = esp_pre_save, 1374 .fields = (VMStateField[]) { 1375 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2), 1376 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState), 1377 VMSTATE_END_OF_LIST() 1378 } 1379 }; 1380 1381 static void sysbus_esp_class_init(ObjectClass *klass, void *data) 1382 { 1383 DeviceClass *dc = DEVICE_CLASS(klass); 1384 1385 dc->realize = sysbus_esp_realize; 1386 dc->reset = sysbus_esp_hard_reset; 1387 dc->vmsd = &vmstate_sysbus_esp_scsi; 1388 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1389 } 1390 1391 static const TypeInfo sysbus_esp_info = { 1392 .name = TYPE_SYSBUS_ESP, 1393 .parent = TYPE_SYS_BUS_DEVICE, 1394 .instance_init = sysbus_esp_init, 1395 .instance_size = sizeof(SysBusESPState), 1396 .class_init = sysbus_esp_class_init, 1397 }; 1398 1399 static void esp_finalize(Object *obj) 1400 { 1401 ESPState *s = ESP(obj); 1402 1403 fifo8_destroy(&s->fifo); 1404 fifo8_destroy(&s->cmdfifo); 1405 } 1406 1407 static void esp_init(Object *obj) 1408 { 1409 ESPState *s = ESP(obj); 1410 1411 fifo8_create(&s->fifo, ESP_FIFO_SZ); 1412 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ); 1413 } 1414 1415 static void esp_class_init(ObjectClass *klass, void *data) 1416 { 1417 DeviceClass *dc = DEVICE_CLASS(klass); 1418 1419 /* internal device for sysbusesp/pciespscsi, not user-creatable */ 1420 dc->user_creatable = false; 1421 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1422 } 1423 1424 static const TypeInfo esp_info = { 1425 .name = TYPE_ESP, 1426 .parent = TYPE_DEVICE, 1427 .instance_init = esp_init, 1428 .instance_finalize = esp_finalize, 1429 .instance_size = sizeof(ESPState), 1430 .class_init = esp_class_init, 1431 }; 1432 1433 static void esp_register_types(void) 1434 { 1435 type_register_static(&sysbus_esp_info); 1436 type_register_static(&esp_info); 1437 } 1438 1439 type_init(esp_register_types) 1440