1 /* 2 * QEMU ESP/NCR53C9x emulation 3 * 4 * Copyright (c) 2005-2006 Fabrice Bellard 5 * Copyright (c) 2012 Herve Poussineau 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 26 #include "qemu/osdep.h" 27 #include "hw/sysbus.h" 28 #include "migration/vmstate.h" 29 #include "hw/irq.h" 30 #include "hw/scsi/esp.h" 31 #include "trace.h" 32 #include "qemu/log.h" 33 #include "qemu/module.h" 34 35 /* 36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O), 37 * also produced as NCR89C100. See 38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt 39 * and 40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt 41 * 42 * On Macintosh Quadra it is a NCR53C96. 43 */ 44 45 static void esp_raise_irq(ESPState *s) 46 { 47 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) { 48 s->rregs[ESP_RSTAT] |= STAT_INT; 49 qemu_irq_raise(s->irq); 50 trace_esp_raise_irq(); 51 } 52 } 53 54 static void esp_lower_irq(ESPState *s) 55 { 56 if (s->rregs[ESP_RSTAT] & STAT_INT) { 57 s->rregs[ESP_RSTAT] &= ~STAT_INT; 58 qemu_irq_lower(s->irq); 59 trace_esp_lower_irq(); 60 } 61 } 62 63 static void esp_raise_drq(ESPState *s) 64 { 65 qemu_irq_raise(s->irq_data); 66 trace_esp_raise_drq(); 67 } 68 69 static void esp_lower_drq(ESPState *s) 70 { 71 qemu_irq_lower(s->irq_data); 72 trace_esp_lower_drq(); 73 } 74 75 void esp_dma_enable(ESPState *s, int irq, int level) 76 { 77 if (level) { 78 s->dma_enabled = 1; 79 trace_esp_dma_enable(); 80 if (s->dma_cb) { 81 s->dma_cb(s); 82 s->dma_cb = NULL; 83 } 84 } else { 85 trace_esp_dma_disable(); 86 s->dma_enabled = 0; 87 } 88 } 89 90 void esp_request_cancelled(SCSIRequest *req) 91 { 92 ESPState *s = req->hba_private; 93 94 if (req == s->current_req) { 95 scsi_req_unref(s->current_req); 96 s->current_req = NULL; 97 s->current_dev = NULL; 98 } 99 } 100 101 static void esp_fifo_push(ESPState *s, uint8_t val) 102 { 103 if (fifo8_num_used(&s->fifo) == ESP_FIFO_SZ) { 104 trace_esp_error_fifo_overrun(); 105 return; 106 } 107 108 fifo8_push(&s->fifo, val); 109 } 110 111 static uint8_t esp_fifo_pop(ESPState *s) 112 { 113 if (fifo8_is_empty(&s->fifo)) { 114 return 0; 115 } 116 117 return fifo8_pop(&s->fifo); 118 } 119 120 static void esp_cmdfifo_push(ESPState *s, uint8_t val) 121 { 122 if (fifo8_num_used(&s->cmdfifo) == ESP_CMDFIFO_SZ) { 123 trace_esp_error_fifo_overrun(); 124 return; 125 } 126 127 fifo8_push(&s->cmdfifo, val); 128 } 129 130 static uint8_t esp_cmdfifo_pop(ESPState *s) 131 { 132 if (fifo8_is_empty(&s->cmdfifo)) { 133 return 0; 134 } 135 136 return fifo8_pop(&s->cmdfifo); 137 } 138 139 static uint32_t esp_get_tc(ESPState *s) 140 { 141 uint32_t dmalen; 142 143 dmalen = s->rregs[ESP_TCLO]; 144 dmalen |= s->rregs[ESP_TCMID] << 8; 145 dmalen |= s->rregs[ESP_TCHI] << 16; 146 147 return dmalen; 148 } 149 150 static void esp_set_tc(ESPState *s, uint32_t dmalen) 151 { 152 s->rregs[ESP_TCLO] = dmalen; 153 s->rregs[ESP_TCMID] = dmalen >> 8; 154 s->rregs[ESP_TCHI] = dmalen >> 16; 155 } 156 157 static uint32_t esp_get_stc(ESPState *s) 158 { 159 uint32_t dmalen; 160 161 dmalen = s->wregs[ESP_TCLO]; 162 dmalen |= s->wregs[ESP_TCMID] << 8; 163 dmalen |= s->wregs[ESP_TCHI] << 16; 164 165 return dmalen; 166 } 167 168 static uint8_t esp_pdma_read(ESPState *s) 169 { 170 uint8_t val; 171 172 if (s->do_cmd) { 173 val = esp_cmdfifo_pop(s); 174 } else { 175 val = esp_fifo_pop(s); 176 } 177 178 return val; 179 } 180 181 static void esp_pdma_write(ESPState *s, uint8_t val) 182 { 183 uint32_t dmalen = esp_get_tc(s); 184 185 if (dmalen == 0) { 186 return; 187 } 188 189 if (s->do_cmd) { 190 esp_cmdfifo_push(s, val); 191 } else { 192 esp_fifo_push(s, val); 193 } 194 195 dmalen--; 196 esp_set_tc(s, dmalen); 197 } 198 199 static int esp_select(ESPState *s) 200 { 201 int target; 202 203 target = s->wregs[ESP_WBUSID] & BUSID_DID; 204 205 s->ti_size = 0; 206 fifo8_reset(&s->fifo); 207 208 if (s->current_req) { 209 /* Started a new command before the old one finished. Cancel it. */ 210 scsi_req_cancel(s->current_req); 211 s->async_len = 0; 212 } 213 214 s->current_dev = scsi_device_find(&s->bus, 0, target, 0); 215 if (!s->current_dev) { 216 /* No such drive */ 217 s->rregs[ESP_RSTAT] = 0; 218 s->rregs[ESP_RINTR] |= INTR_DC; 219 s->rregs[ESP_RSEQ] = SEQ_0; 220 esp_raise_irq(s); 221 return -1; 222 } 223 224 /* 225 * Note that we deliberately don't raise the IRQ here: this will be done 226 * either in do_busid_cmd() for DATA OUT transfers or by the deferred 227 * IRQ mechanism in esp_transfer_data() for DATA IN transfers 228 */ 229 s->rregs[ESP_RINTR] |= INTR_FC; 230 s->rregs[ESP_RSEQ] = SEQ_CD; 231 return 0; 232 } 233 234 static uint32_t get_cmd(ESPState *s, uint32_t maxlen) 235 { 236 uint8_t buf[ESP_CMDFIFO_SZ]; 237 uint32_t dmalen, n; 238 int target; 239 240 target = s->wregs[ESP_WBUSID] & BUSID_DID; 241 if (s->dma) { 242 dmalen = MIN(esp_get_tc(s), maxlen); 243 if (dmalen == 0) { 244 return 0; 245 } 246 if (s->dma_memory_read) { 247 s->dma_memory_read(s->dma_opaque, buf, dmalen); 248 fifo8_push_all(&s->cmdfifo, buf, dmalen); 249 } else { 250 if (esp_select(s) < 0) { 251 fifo8_reset(&s->cmdfifo); 252 return -1; 253 } 254 esp_raise_drq(s); 255 fifo8_reset(&s->cmdfifo); 256 return 0; 257 } 258 } else { 259 dmalen = MIN(fifo8_num_used(&s->fifo), maxlen); 260 if (dmalen == 0) { 261 return 0; 262 } 263 memcpy(buf, fifo8_pop_buf(&s->fifo, dmalen, &n), dmalen); 264 if (dmalen >= 3) { 265 buf[0] = buf[2] >> 5; 266 } 267 fifo8_push_all(&s->cmdfifo, buf, dmalen); 268 } 269 trace_esp_get_cmd(dmalen, target); 270 271 if (esp_select(s) < 0) { 272 fifo8_reset(&s->cmdfifo); 273 return -1; 274 } 275 return dmalen; 276 } 277 278 static void do_busid_cmd(ESPState *s, uint8_t busid) 279 { 280 uint32_t n, cmdlen; 281 int32_t datalen; 282 int lun; 283 SCSIDevice *current_lun; 284 uint8_t *buf; 285 286 trace_esp_do_busid_cmd(busid); 287 lun = busid & 7; 288 cmdlen = fifo8_num_used(&s->cmdfifo); 289 buf = (uint8_t *)fifo8_pop_buf(&s->cmdfifo, cmdlen, &n); 290 291 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun); 292 s->current_req = scsi_req_new(current_lun, 0, lun, buf, s); 293 datalen = scsi_req_enqueue(s->current_req); 294 s->ti_size = datalen; 295 fifo8_reset(&s->cmdfifo); 296 if (datalen != 0) { 297 s->rregs[ESP_RSTAT] = STAT_TC; 298 s->rregs[ESP_RSEQ] = SEQ_CD; 299 s->ti_cmd = 0; 300 esp_set_tc(s, 0); 301 if (datalen > 0) { 302 /* 303 * Switch to DATA IN phase but wait until initial data xfer is 304 * complete before raising the command completion interrupt 305 */ 306 s->data_in_ready = false; 307 s->rregs[ESP_RSTAT] |= STAT_DI; 308 } else { 309 s->rregs[ESP_RSTAT] |= STAT_DO; 310 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 311 esp_raise_irq(s); 312 esp_lower_drq(s); 313 } 314 scsi_req_continue(s->current_req); 315 return; 316 } 317 } 318 319 static void do_cmd(ESPState *s) 320 { 321 uint8_t busid = fifo8_pop(&s->cmdfifo); 322 uint32_t n; 323 324 s->cmdfifo_cdb_offset--; 325 326 /* Ignore extended messages for now */ 327 if (s->cmdfifo_cdb_offset) { 328 fifo8_pop_buf(&s->cmdfifo, s->cmdfifo_cdb_offset, &n); 329 s->cmdfifo_cdb_offset = 0; 330 } 331 332 do_busid_cmd(s, busid); 333 } 334 335 static void satn_pdma_cb(ESPState *s) 336 { 337 s->do_cmd = 0; 338 if (!fifo8_is_empty(&s->cmdfifo)) { 339 s->cmdfifo_cdb_offset = 1; 340 do_cmd(s); 341 } 342 } 343 344 static void handle_satn(ESPState *s) 345 { 346 int32_t cmdlen; 347 348 if (s->dma && !s->dma_enabled) { 349 s->dma_cb = handle_satn; 350 return; 351 } 352 s->pdma_cb = satn_pdma_cb; 353 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 354 if (cmdlen > 0) { 355 s->cmdfifo_cdb_offset = 1; 356 do_cmd(s); 357 } else if (cmdlen == 0) { 358 s->do_cmd = 1; 359 /* Target present, but no cmd yet - switch to command phase */ 360 s->rregs[ESP_RSEQ] = SEQ_CD; 361 s->rregs[ESP_RSTAT] = STAT_CD; 362 } 363 } 364 365 static void s_without_satn_pdma_cb(ESPState *s) 366 { 367 uint32_t len; 368 369 s->do_cmd = 0; 370 len = fifo8_num_used(&s->cmdfifo); 371 if (len) { 372 s->cmdfifo_cdb_offset = 0; 373 do_busid_cmd(s, 0); 374 } 375 } 376 377 static void handle_s_without_atn(ESPState *s) 378 { 379 int32_t cmdlen; 380 381 if (s->dma && !s->dma_enabled) { 382 s->dma_cb = handle_s_without_atn; 383 return; 384 } 385 s->pdma_cb = s_without_satn_pdma_cb; 386 cmdlen = get_cmd(s, ESP_CMDFIFO_SZ); 387 if (cmdlen > 0) { 388 s->cmdfifo_cdb_offset = 0; 389 do_busid_cmd(s, 0); 390 } else if (cmdlen == 0) { 391 s->do_cmd = 1; 392 /* Target present, but no cmd yet - switch to command phase */ 393 s->rregs[ESP_RSEQ] = SEQ_CD; 394 s->rregs[ESP_RSTAT] = STAT_CD; 395 } 396 } 397 398 static void satn_stop_pdma_cb(ESPState *s) 399 { 400 s->do_cmd = 0; 401 if (!fifo8_is_empty(&s->cmdfifo)) { 402 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 403 s->do_cmd = 1; 404 s->cmdfifo_cdb_offset = 1; 405 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 406 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 407 s->rregs[ESP_RSEQ] = SEQ_CD; 408 esp_raise_irq(s); 409 } 410 } 411 412 static void handle_satn_stop(ESPState *s) 413 { 414 int32_t cmdlen; 415 416 if (s->dma && !s->dma_enabled) { 417 s->dma_cb = handle_satn_stop; 418 return; 419 } 420 s->pdma_cb = satn_stop_pdma_cb; 421 cmdlen = get_cmd(s, 1); 422 if (cmdlen > 0) { 423 trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo)); 424 s->do_cmd = 1; 425 s->cmdfifo_cdb_offset = 1; 426 s->rregs[ESP_RSTAT] = STAT_MO; 427 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 428 s->rregs[ESP_RSEQ] = SEQ_MO; 429 esp_raise_irq(s); 430 } else if (cmdlen == 0) { 431 s->do_cmd = 1; 432 /* Target present, switch to message out phase */ 433 s->rregs[ESP_RSEQ] = SEQ_MO; 434 s->rregs[ESP_RSTAT] = STAT_MO; 435 } 436 } 437 438 static void write_response_pdma_cb(ESPState *s) 439 { 440 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 441 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 442 s->rregs[ESP_RSEQ] = SEQ_CD; 443 esp_raise_irq(s); 444 } 445 446 static void write_response(ESPState *s) 447 { 448 uint32_t n; 449 450 trace_esp_write_response(s->status); 451 452 fifo8_reset(&s->fifo); 453 esp_fifo_push(s, s->status); 454 esp_fifo_push(s, 0); 455 456 if (s->dma) { 457 if (s->dma_memory_write) { 458 s->dma_memory_write(s->dma_opaque, 459 (uint8_t *)fifo8_pop_buf(&s->fifo, 2, &n), 2); 460 s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST; 461 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC; 462 s->rregs[ESP_RSEQ] = SEQ_CD; 463 } else { 464 s->pdma_cb = write_response_pdma_cb; 465 esp_raise_drq(s); 466 return; 467 } 468 } else { 469 s->ti_size = 2; 470 s->rregs[ESP_RFLAGS] = 2; 471 } 472 esp_raise_irq(s); 473 } 474 475 static void esp_dma_done(ESPState *s) 476 { 477 s->rregs[ESP_RSTAT] |= STAT_TC; 478 s->rregs[ESP_RINTR] |= INTR_BS; 479 s->rregs[ESP_RSEQ] = 0; 480 s->rregs[ESP_RFLAGS] = 0; 481 esp_set_tc(s, 0); 482 esp_raise_irq(s); 483 } 484 485 static void do_dma_pdma_cb(ESPState *s) 486 { 487 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 488 int len; 489 uint32_t n; 490 491 if (s->do_cmd) { 492 s->ti_size = 0; 493 s->do_cmd = 0; 494 do_cmd(s); 495 esp_lower_drq(s); 496 return; 497 } 498 499 if (to_device) { 500 /* Copy FIFO data to device */ 501 len = MIN(s->async_len, ESP_FIFO_SZ); 502 len = MIN(len, fifo8_num_used(&s->fifo)); 503 memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len); 504 s->async_buf += n; 505 s->async_len -= n; 506 s->ti_size += n; 507 508 if (n < len) { 509 /* Unaligned accesses can cause FIFO wraparound */ 510 len = len - n; 511 memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len); 512 s->async_buf += n; 513 s->async_len -= n; 514 s->ti_size += n; 515 } 516 517 if (s->async_len == 0) { 518 scsi_req_continue(s->current_req); 519 return; 520 } 521 522 if (esp_get_tc(s) == 0) { 523 esp_lower_drq(s); 524 esp_dma_done(s); 525 } 526 527 return; 528 } else { 529 if (s->async_len == 0) { 530 if (s->current_req) { 531 /* Defer until the scsi layer has completed */ 532 scsi_req_continue(s->current_req); 533 s->data_in_ready = false; 534 } 535 return; 536 } 537 538 if (esp_get_tc(s) != 0) { 539 /* Copy device data to FIFO */ 540 len = MIN(s->async_len, esp_get_tc(s)); 541 len = MIN(len, fifo8_num_free(&s->fifo)); 542 fifo8_push_all(&s->fifo, s->async_buf, len); 543 s->async_buf += len; 544 s->async_len -= len; 545 s->ti_size -= len; 546 esp_set_tc(s, esp_get_tc(s) - len); 547 548 if (esp_get_tc(s) == 0) { 549 /* Indicate transfer to FIFO is complete */ 550 s->rregs[ESP_RSTAT] |= STAT_TC; 551 } 552 return; 553 } 554 555 /* Partially filled a scsi buffer. Complete immediately. */ 556 esp_lower_drq(s); 557 esp_dma_done(s); 558 } 559 } 560 561 static void esp_do_dma(ESPState *s) 562 { 563 uint32_t len, cmdlen; 564 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 565 uint8_t buf[ESP_CMDFIFO_SZ]; 566 567 len = esp_get_tc(s); 568 if (s->do_cmd) { 569 /* 570 * handle_ti_cmd() case: esp_do_dma() is called only from 571 * handle_ti_cmd() with do_cmd != NULL (see the assert()) 572 */ 573 cmdlen = fifo8_num_used(&s->cmdfifo); 574 trace_esp_do_dma(cmdlen, len); 575 if (s->dma_memory_read) { 576 s->dma_memory_read(s->dma_opaque, buf, len); 577 fifo8_push_all(&s->cmdfifo, buf, len); 578 } else { 579 s->pdma_cb = do_dma_pdma_cb; 580 esp_raise_drq(s); 581 return; 582 } 583 trace_esp_handle_ti_cmd(cmdlen); 584 s->ti_size = 0; 585 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 586 /* No command received */ 587 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 588 return; 589 } 590 591 /* Command has been received */ 592 s->do_cmd = 0; 593 do_cmd(s); 594 } else { 595 /* 596 * Extra message out bytes received: update cmdfifo_cdb_offset 597 * and then switch to commmand phase 598 */ 599 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 600 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 601 s->rregs[ESP_RSEQ] = SEQ_CD; 602 s->rregs[ESP_RINTR] |= INTR_BS; 603 esp_raise_irq(s); 604 } 605 return; 606 } 607 if (s->async_len == 0) { 608 /* Defer until data is available. */ 609 return; 610 } 611 if (len > s->async_len) { 612 len = s->async_len; 613 } 614 if (to_device) { 615 if (s->dma_memory_read) { 616 s->dma_memory_read(s->dma_opaque, s->async_buf, len); 617 } else { 618 s->pdma_cb = do_dma_pdma_cb; 619 esp_raise_drq(s); 620 return; 621 } 622 } else { 623 if (s->dma_memory_write) { 624 s->dma_memory_write(s->dma_opaque, s->async_buf, len); 625 } else { 626 /* Adjust TC for any leftover data in the FIFO */ 627 if (!fifo8_is_empty(&s->fifo)) { 628 esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo)); 629 } 630 631 /* Copy device data to FIFO */ 632 len = MIN(len, fifo8_num_free(&s->fifo)); 633 fifo8_push_all(&s->fifo, s->async_buf, len); 634 s->async_buf += len; 635 s->async_len -= len; 636 s->ti_size -= len; 637 638 /* 639 * MacOS toolbox uses a TI length of 16 bytes for all commands, so 640 * commands shorter than this must be padded accordingly 641 */ 642 if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) { 643 while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) { 644 esp_fifo_push(s, 0); 645 len++; 646 } 647 } 648 649 esp_set_tc(s, esp_get_tc(s) - len); 650 s->pdma_cb = do_dma_pdma_cb; 651 esp_raise_drq(s); 652 653 /* Indicate transfer to FIFO is complete */ 654 s->rregs[ESP_RSTAT] |= STAT_TC; 655 return; 656 } 657 } 658 esp_set_tc(s, esp_get_tc(s) - len); 659 s->async_buf += len; 660 s->async_len -= len; 661 if (to_device) { 662 s->ti_size += len; 663 } else { 664 s->ti_size -= len; 665 } 666 if (s->async_len == 0) { 667 scsi_req_continue(s->current_req); 668 /* 669 * If there is still data to be read from the device then 670 * complete the DMA operation immediately. Otherwise defer 671 * until the scsi layer has completed. 672 */ 673 if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) { 674 return; 675 } 676 } 677 678 /* Partially filled a scsi buffer. Complete immediately. */ 679 esp_dma_done(s); 680 esp_lower_drq(s); 681 } 682 683 static void esp_do_nodma(ESPState *s) 684 { 685 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 686 uint32_t cmdlen, n; 687 int len; 688 689 if (s->do_cmd) { 690 cmdlen = fifo8_num_used(&s->cmdfifo); 691 trace_esp_handle_ti_cmd(cmdlen); 692 s->ti_size = 0; 693 if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) { 694 /* No command received */ 695 if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) { 696 return; 697 } 698 699 /* Command has been received */ 700 s->do_cmd = 0; 701 do_cmd(s); 702 } else { 703 /* 704 * Extra message out bytes received: update cmdfifo_cdb_offset 705 * and then switch to commmand phase 706 */ 707 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); 708 s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; 709 s->rregs[ESP_RSEQ] = SEQ_CD; 710 s->rregs[ESP_RINTR] |= INTR_BS; 711 esp_raise_irq(s); 712 } 713 return; 714 } 715 716 if (s->async_len == 0) { 717 /* Defer until data is available. */ 718 return; 719 } 720 721 if (to_device) { 722 len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ); 723 memcpy(s->async_buf, fifo8_pop_buf(&s->fifo, len, &n), len); 724 s->async_buf += len; 725 s->async_len -= len; 726 s->ti_size += len; 727 } else { 728 len = MIN(s->ti_size, s->async_len); 729 len = MIN(len, fifo8_num_free(&s->fifo)); 730 fifo8_push_all(&s->fifo, s->async_buf, len); 731 s->async_buf += len; 732 s->async_len -= len; 733 s->ti_size -= len; 734 } 735 736 if (s->async_len == 0) { 737 scsi_req_continue(s->current_req); 738 739 if (to_device || s->ti_size == 0) { 740 return; 741 } 742 } 743 744 s->rregs[ESP_RINTR] |= INTR_BS; 745 esp_raise_irq(s); 746 } 747 748 void esp_command_complete(SCSIRequest *req, size_t resid) 749 { 750 ESPState *s = req->hba_private; 751 752 trace_esp_command_complete(); 753 if (s->ti_size != 0) { 754 trace_esp_command_complete_unexpected(); 755 } 756 s->ti_size = 0; 757 s->async_len = 0; 758 if (req->status) { 759 trace_esp_command_complete_fail(); 760 } 761 s->status = req->status; 762 s->rregs[ESP_RSTAT] = STAT_ST; 763 esp_dma_done(s); 764 esp_lower_drq(s); 765 if (s->current_req) { 766 scsi_req_unref(s->current_req); 767 s->current_req = NULL; 768 s->current_dev = NULL; 769 } 770 } 771 772 void esp_transfer_data(SCSIRequest *req, uint32_t len) 773 { 774 ESPState *s = req->hba_private; 775 int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO); 776 uint32_t dmalen = esp_get_tc(s); 777 778 assert(!s->do_cmd); 779 trace_esp_transfer_data(dmalen, s->ti_size); 780 s->async_len = len; 781 s->async_buf = scsi_req_get_buf(req); 782 783 if (!to_device && !s->data_in_ready) { 784 /* 785 * Initial incoming data xfer is complete so raise command 786 * completion interrupt 787 */ 788 s->data_in_ready = true; 789 s->rregs[ESP_RSTAT] |= STAT_TC; 790 s->rregs[ESP_RINTR] |= INTR_BS; 791 esp_raise_irq(s); 792 793 /* 794 * If data is ready to transfer and the TI command has already 795 * been executed, start DMA immediately. Otherwise DMA will start 796 * when host sends the TI command 797 */ 798 if (s->ti_size && (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA))) { 799 esp_do_dma(s); 800 } 801 return; 802 } 803 804 if (s->ti_cmd == 0) { 805 /* 806 * Always perform the initial transfer upon reception of the next TI 807 * command to ensure the DMA/non-DMA status of the command is correct. 808 * It is not possible to use s->dma directly in the section below as 809 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the 810 * async data transfer is delayed then s->dma is set incorrectly. 811 */ 812 return; 813 } 814 815 if (s->ti_cmd & CMD_DMA) { 816 if (dmalen) { 817 esp_do_dma(s); 818 } else if (s->ti_size <= 0) { 819 /* 820 * If this was the last part of a DMA transfer then the 821 * completion interrupt is deferred to here. 822 */ 823 esp_dma_done(s); 824 esp_lower_drq(s); 825 } 826 } else { 827 esp_do_nodma(s); 828 } 829 } 830 831 static void handle_ti(ESPState *s) 832 { 833 uint32_t dmalen; 834 835 if (s->dma && !s->dma_enabled) { 836 s->dma_cb = handle_ti; 837 return; 838 } 839 840 s->ti_cmd = s->rregs[ESP_CMD]; 841 if (s->dma) { 842 dmalen = esp_get_tc(s); 843 trace_esp_handle_ti(dmalen); 844 s->rregs[ESP_RSTAT] &= ~STAT_TC; 845 esp_do_dma(s); 846 } else { 847 trace_esp_handle_ti(s->ti_size); 848 esp_do_nodma(s); 849 } 850 } 851 852 void esp_hard_reset(ESPState *s) 853 { 854 memset(s->rregs, 0, ESP_REGS); 855 memset(s->wregs, 0, ESP_REGS); 856 s->tchi_written = 0; 857 s->ti_size = 0; 858 fifo8_reset(&s->fifo); 859 fifo8_reset(&s->cmdfifo); 860 s->dma = 0; 861 s->do_cmd = 0; 862 s->dma_cb = NULL; 863 864 s->rregs[ESP_CFG1] = 7; 865 } 866 867 static void esp_soft_reset(ESPState *s) 868 { 869 qemu_irq_lower(s->irq); 870 qemu_irq_lower(s->irq_data); 871 esp_hard_reset(s); 872 } 873 874 static void parent_esp_reset(ESPState *s, int irq, int level) 875 { 876 if (level) { 877 esp_soft_reset(s); 878 } 879 } 880 881 uint64_t esp_reg_read(ESPState *s, uint32_t saddr) 882 { 883 uint32_t val; 884 885 switch (saddr) { 886 case ESP_FIFO: 887 if (s->dma_memory_read && s->dma_memory_write && 888 (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) { 889 /* Data out. */ 890 qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n"); 891 s->rregs[ESP_FIFO] = 0; 892 } else { 893 s->rregs[ESP_FIFO] = esp_fifo_pop(s); 894 } 895 val = s->rregs[ESP_FIFO]; 896 break; 897 case ESP_RINTR: 898 /* 899 * Clear sequence step, interrupt register and all status bits 900 * except TC 901 */ 902 val = s->rregs[ESP_RINTR]; 903 s->rregs[ESP_RINTR] = 0; 904 s->rregs[ESP_RSTAT] &= ~STAT_TC; 905 s->rregs[ESP_RSEQ] = SEQ_0; 906 esp_lower_irq(s); 907 break; 908 case ESP_TCHI: 909 /* Return the unique id if the value has never been written */ 910 if (!s->tchi_written) { 911 val = s->chip_id; 912 } else { 913 val = s->rregs[saddr]; 914 } 915 break; 916 case ESP_RFLAGS: 917 /* Bottom 5 bits indicate number of bytes in FIFO */ 918 val = fifo8_num_used(&s->fifo); 919 break; 920 default: 921 val = s->rregs[saddr]; 922 break; 923 } 924 925 trace_esp_mem_readb(saddr, val); 926 return val; 927 } 928 929 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) 930 { 931 trace_esp_mem_writeb(saddr, s->wregs[saddr], val); 932 switch (saddr) { 933 case ESP_TCHI: 934 s->tchi_written = true; 935 /* fall through */ 936 case ESP_TCLO: 937 case ESP_TCMID: 938 s->rregs[ESP_RSTAT] &= ~STAT_TC; 939 break; 940 case ESP_FIFO: 941 if (s->do_cmd) { 942 esp_cmdfifo_push(s, val); 943 } else { 944 esp_fifo_push(s, val); 945 } 946 947 /* Non-DMA transfers raise an interrupt after every byte */ 948 if (s->rregs[ESP_CMD] == CMD_TI) { 949 s->rregs[ESP_RINTR] |= INTR_FC | INTR_BS; 950 esp_raise_irq(s); 951 } 952 break; 953 case ESP_CMD: 954 s->rregs[saddr] = val; 955 if (val & CMD_DMA) { 956 s->dma = 1; 957 /* Reload DMA counter. */ 958 if (esp_get_stc(s) == 0) { 959 esp_set_tc(s, 0x10000); 960 } else { 961 esp_set_tc(s, esp_get_stc(s)); 962 } 963 } else { 964 s->dma = 0; 965 } 966 switch (val & CMD_CMD) { 967 case CMD_NOP: 968 trace_esp_mem_writeb_cmd_nop(val); 969 break; 970 case CMD_FLUSH: 971 trace_esp_mem_writeb_cmd_flush(val); 972 fifo8_reset(&s->fifo); 973 break; 974 case CMD_RESET: 975 trace_esp_mem_writeb_cmd_reset(val); 976 esp_soft_reset(s); 977 break; 978 case CMD_BUSRESET: 979 trace_esp_mem_writeb_cmd_bus_reset(val); 980 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { 981 s->rregs[ESP_RINTR] |= INTR_RST; 982 esp_raise_irq(s); 983 } 984 break; 985 case CMD_TI: 986 trace_esp_mem_writeb_cmd_ti(val); 987 handle_ti(s); 988 break; 989 case CMD_ICCS: 990 trace_esp_mem_writeb_cmd_iccs(val); 991 write_response(s); 992 s->rregs[ESP_RINTR] |= INTR_FC; 993 s->rregs[ESP_RSTAT] |= STAT_MI; 994 break; 995 case CMD_MSGACC: 996 trace_esp_mem_writeb_cmd_msgacc(val); 997 s->rregs[ESP_RINTR] |= INTR_DC; 998 s->rregs[ESP_RSEQ] = 0; 999 s->rregs[ESP_RFLAGS] = 0; 1000 esp_raise_irq(s); 1001 break; 1002 case CMD_PAD: 1003 trace_esp_mem_writeb_cmd_pad(val); 1004 s->rregs[ESP_RSTAT] = STAT_TC; 1005 s->rregs[ESP_RINTR] |= INTR_FC; 1006 s->rregs[ESP_RSEQ] = 0; 1007 break; 1008 case CMD_SATN: 1009 trace_esp_mem_writeb_cmd_satn(val); 1010 break; 1011 case CMD_RSTATN: 1012 trace_esp_mem_writeb_cmd_rstatn(val); 1013 break; 1014 case CMD_SEL: 1015 trace_esp_mem_writeb_cmd_sel(val); 1016 handle_s_without_atn(s); 1017 break; 1018 case CMD_SELATN: 1019 trace_esp_mem_writeb_cmd_selatn(val); 1020 handle_satn(s); 1021 break; 1022 case CMD_SELATNS: 1023 trace_esp_mem_writeb_cmd_selatns(val); 1024 handle_satn_stop(s); 1025 break; 1026 case CMD_ENSEL: 1027 trace_esp_mem_writeb_cmd_ensel(val); 1028 s->rregs[ESP_RINTR] = 0; 1029 break; 1030 case CMD_DISSEL: 1031 trace_esp_mem_writeb_cmd_dissel(val); 1032 s->rregs[ESP_RINTR] = 0; 1033 esp_raise_irq(s); 1034 break; 1035 default: 1036 trace_esp_error_unhandled_command(val); 1037 break; 1038 } 1039 break; 1040 case ESP_WBUSID ... ESP_WSYNO: 1041 break; 1042 case ESP_CFG1: 1043 case ESP_CFG2: case ESP_CFG3: 1044 case ESP_RES3: case ESP_RES4: 1045 s->rregs[saddr] = val; 1046 break; 1047 case ESP_WCCF ... ESP_WTEST: 1048 break; 1049 default: 1050 trace_esp_error_invalid_write(val, saddr); 1051 return; 1052 } 1053 s->wregs[saddr] = val; 1054 } 1055 1056 static bool esp_mem_accepts(void *opaque, hwaddr addr, 1057 unsigned size, bool is_write, 1058 MemTxAttrs attrs) 1059 { 1060 return (size == 1) || (is_write && size == 4); 1061 } 1062 1063 static bool esp_is_before_version_5(void *opaque, int version_id) 1064 { 1065 ESPState *s = ESP(opaque); 1066 1067 version_id = MIN(version_id, s->mig_version_id); 1068 return version_id < 5; 1069 } 1070 1071 static bool esp_is_version_5(void *opaque, int version_id) 1072 { 1073 ESPState *s = ESP(opaque); 1074 1075 version_id = MIN(version_id, s->mig_version_id); 1076 return version_id == 5; 1077 } 1078 1079 static int esp_pre_save(void *opaque) 1080 { 1081 ESPState *s = ESP(opaque); 1082 1083 s->mig_version_id = vmstate_esp.version_id; 1084 return 0; 1085 } 1086 1087 static int esp_post_load(void *opaque, int version_id) 1088 { 1089 ESPState *s = ESP(opaque); 1090 int len, i; 1091 1092 version_id = MIN(version_id, s->mig_version_id); 1093 1094 if (version_id < 5) { 1095 esp_set_tc(s, s->mig_dma_left); 1096 1097 /* Migrate ti_buf to fifo */ 1098 len = s->mig_ti_wptr - s->mig_ti_rptr; 1099 for (i = 0; i < len; i++) { 1100 fifo8_push(&s->fifo, s->mig_ti_buf[i]); 1101 } 1102 1103 /* Migrate cmdbuf to cmdfifo */ 1104 for (i = 0; i < s->mig_cmdlen; i++) { 1105 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]); 1106 } 1107 } 1108 1109 s->mig_version_id = vmstate_esp.version_id; 1110 return 0; 1111 } 1112 1113 const VMStateDescription vmstate_esp = { 1114 .name = "esp", 1115 .version_id = 5, 1116 .minimum_version_id = 3, 1117 .pre_save = esp_pre_save, 1118 .post_load = esp_post_load, 1119 .fields = (VMStateField[]) { 1120 VMSTATE_BUFFER(rregs, ESPState), 1121 VMSTATE_BUFFER(wregs, ESPState), 1122 VMSTATE_INT32(ti_size, ESPState), 1123 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5), 1124 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5), 1125 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5), 1126 VMSTATE_UINT32(status, ESPState), 1127 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState, 1128 esp_is_before_version_5), 1129 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState, 1130 esp_is_before_version_5), 1131 VMSTATE_UINT32(dma, ESPState), 1132 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0, 1133 esp_is_before_version_5, 0, 16), 1134 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4, 1135 esp_is_before_version_5, 16, 1136 sizeof(typeof_field(ESPState, mig_cmdbuf))), 1137 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5), 1138 VMSTATE_UINT32(do_cmd, ESPState), 1139 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5), 1140 VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5), 1141 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5), 1142 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5), 1143 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5), 1144 VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5), 1145 VMSTATE_END_OF_LIST() 1146 }, 1147 }; 1148 1149 static void sysbus_esp_mem_write(void *opaque, hwaddr addr, 1150 uint64_t val, unsigned int size) 1151 { 1152 SysBusESPState *sysbus = opaque; 1153 ESPState *s = ESP(&sysbus->esp); 1154 uint32_t saddr; 1155 1156 saddr = addr >> sysbus->it_shift; 1157 esp_reg_write(s, saddr, val); 1158 } 1159 1160 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr, 1161 unsigned int size) 1162 { 1163 SysBusESPState *sysbus = opaque; 1164 ESPState *s = ESP(&sysbus->esp); 1165 uint32_t saddr; 1166 1167 saddr = addr >> sysbus->it_shift; 1168 return esp_reg_read(s, saddr); 1169 } 1170 1171 static const MemoryRegionOps sysbus_esp_mem_ops = { 1172 .read = sysbus_esp_mem_read, 1173 .write = sysbus_esp_mem_write, 1174 .endianness = DEVICE_NATIVE_ENDIAN, 1175 .valid.accepts = esp_mem_accepts, 1176 }; 1177 1178 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr, 1179 uint64_t val, unsigned int size) 1180 { 1181 SysBusESPState *sysbus = opaque; 1182 ESPState *s = ESP(&sysbus->esp); 1183 uint32_t dmalen; 1184 1185 trace_esp_pdma_write(size); 1186 1187 switch (size) { 1188 case 1: 1189 esp_pdma_write(s, val); 1190 break; 1191 case 2: 1192 esp_pdma_write(s, val >> 8); 1193 esp_pdma_write(s, val); 1194 break; 1195 } 1196 dmalen = esp_get_tc(s); 1197 if (dmalen == 0 || fifo8_num_free(&s->fifo) < 2) { 1198 s->pdma_cb(s); 1199 } 1200 } 1201 1202 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr, 1203 unsigned int size) 1204 { 1205 SysBusESPState *sysbus = opaque; 1206 ESPState *s = ESP(&sysbus->esp); 1207 uint64_t val = 0; 1208 1209 trace_esp_pdma_read(size); 1210 1211 switch (size) { 1212 case 1: 1213 val = esp_pdma_read(s); 1214 break; 1215 case 2: 1216 val = esp_pdma_read(s); 1217 val = (val << 8) | esp_pdma_read(s); 1218 break; 1219 } 1220 if (fifo8_num_used(&s->fifo) < 2) { 1221 s->pdma_cb(s); 1222 } 1223 return val; 1224 } 1225 1226 static const MemoryRegionOps sysbus_esp_pdma_ops = { 1227 .read = sysbus_esp_pdma_read, 1228 .write = sysbus_esp_pdma_write, 1229 .endianness = DEVICE_NATIVE_ENDIAN, 1230 .valid.min_access_size = 1, 1231 .valid.max_access_size = 4, 1232 .impl.min_access_size = 1, 1233 .impl.max_access_size = 2, 1234 }; 1235 1236 static const struct SCSIBusInfo esp_scsi_info = { 1237 .tcq = false, 1238 .max_target = ESP_MAX_DEVS, 1239 .max_lun = 7, 1240 1241 .transfer_data = esp_transfer_data, 1242 .complete = esp_command_complete, 1243 .cancel = esp_request_cancelled 1244 }; 1245 1246 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level) 1247 { 1248 SysBusESPState *sysbus = SYSBUS_ESP(opaque); 1249 ESPState *s = ESP(&sysbus->esp); 1250 1251 switch (irq) { 1252 case 0: 1253 parent_esp_reset(s, irq, level); 1254 break; 1255 case 1: 1256 esp_dma_enable(opaque, irq, level); 1257 break; 1258 } 1259 } 1260 1261 static void sysbus_esp_realize(DeviceState *dev, Error **errp) 1262 { 1263 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1264 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1265 ESPState *s = ESP(&sysbus->esp); 1266 1267 if (!qdev_realize(DEVICE(s), NULL, errp)) { 1268 return; 1269 } 1270 1271 sysbus_init_irq(sbd, &s->irq); 1272 sysbus_init_irq(sbd, &s->irq_data); 1273 assert(sysbus->it_shift != -1); 1274 1275 s->chip_id = TCHI_FAS100A; 1276 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops, 1277 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift); 1278 sysbus_init_mmio(sbd, &sysbus->iomem); 1279 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops, 1280 sysbus, "esp-pdma", 4); 1281 sysbus_init_mmio(sbd, &sysbus->pdma); 1282 1283 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2); 1284 1285 scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL); 1286 } 1287 1288 static void sysbus_esp_hard_reset(DeviceState *dev) 1289 { 1290 SysBusESPState *sysbus = SYSBUS_ESP(dev); 1291 ESPState *s = ESP(&sysbus->esp); 1292 1293 esp_hard_reset(s); 1294 } 1295 1296 static void sysbus_esp_init(Object *obj) 1297 { 1298 SysBusESPState *sysbus = SYSBUS_ESP(obj); 1299 1300 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP); 1301 } 1302 1303 static const VMStateDescription vmstate_sysbus_esp_scsi = { 1304 .name = "sysbusespscsi", 1305 .version_id = 2, 1306 .minimum_version_id = 1, 1307 .fields = (VMStateField[]) { 1308 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2), 1309 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState), 1310 VMSTATE_END_OF_LIST() 1311 } 1312 }; 1313 1314 static void sysbus_esp_class_init(ObjectClass *klass, void *data) 1315 { 1316 DeviceClass *dc = DEVICE_CLASS(klass); 1317 1318 dc->realize = sysbus_esp_realize; 1319 dc->reset = sysbus_esp_hard_reset; 1320 dc->vmsd = &vmstate_sysbus_esp_scsi; 1321 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1322 } 1323 1324 static const TypeInfo sysbus_esp_info = { 1325 .name = TYPE_SYSBUS_ESP, 1326 .parent = TYPE_SYS_BUS_DEVICE, 1327 .instance_init = sysbus_esp_init, 1328 .instance_size = sizeof(SysBusESPState), 1329 .class_init = sysbus_esp_class_init, 1330 }; 1331 1332 static void esp_finalize(Object *obj) 1333 { 1334 ESPState *s = ESP(obj); 1335 1336 fifo8_destroy(&s->fifo); 1337 fifo8_destroy(&s->cmdfifo); 1338 } 1339 1340 static void esp_init(Object *obj) 1341 { 1342 ESPState *s = ESP(obj); 1343 1344 fifo8_create(&s->fifo, ESP_FIFO_SZ); 1345 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ); 1346 } 1347 1348 static void esp_class_init(ObjectClass *klass, void *data) 1349 { 1350 DeviceClass *dc = DEVICE_CLASS(klass); 1351 1352 /* internal device for sysbusesp/pciespscsi, not user-creatable */ 1353 dc->user_creatable = false; 1354 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 1355 } 1356 1357 static const TypeInfo esp_info = { 1358 .name = TYPE_ESP, 1359 .parent = TYPE_DEVICE, 1360 .instance_init = esp_init, 1361 .instance_finalize = esp_finalize, 1362 .instance_size = sizeof(ESPState), 1363 .class_init = esp_class_init, 1364 }; 1365 1366 static void esp_register_types(void) 1367 { 1368 type_register_static(&sysbus_esp_info); 1369 type_register_static(&esp_info); 1370 } 1371 1372 type_init(esp_register_types) 1373