1 /*
2 * QEMU ESP/NCR53C9x emulation
3 *
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
6 * Copyright (c) 2023 Mark Cave-Ayland
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
27 #include "qemu/osdep.h"
28 #include "hw/sysbus.h"
29 #include "migration/vmstate.h"
30 #include "hw/irq.h"
31 #include "hw/scsi/esp.h"
32 #include "trace.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
35
36 /*
37 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38 * also produced as NCR89C100. See
39 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40 * and
41 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42 *
43 * On Macintosh Quadra it is a NCR53C96.
44 */
45
esp_raise_irq(ESPState * s)46 static void esp_raise_irq(ESPState *s)
47 {
48 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
49 s->rregs[ESP_RSTAT] |= STAT_INT;
50 qemu_irq_raise(s->irq);
51 trace_esp_raise_irq();
52 }
53 }
54
esp_lower_irq(ESPState * s)55 static void esp_lower_irq(ESPState *s)
56 {
57 if (s->rregs[ESP_RSTAT] & STAT_INT) {
58 s->rregs[ESP_RSTAT] &= ~STAT_INT;
59 qemu_irq_lower(s->irq);
60 trace_esp_lower_irq();
61 }
62 }
63
esp_raise_drq(ESPState * s)64 static void esp_raise_drq(ESPState *s)
65 {
66 if (!(s->drq_state)) {
67 qemu_irq_raise(s->drq_irq);
68 trace_esp_raise_drq();
69 s->drq_state = true;
70 }
71 }
72
esp_lower_drq(ESPState * s)73 static void esp_lower_drq(ESPState *s)
74 {
75 if (s->drq_state) {
76 qemu_irq_lower(s->drq_irq);
77 trace_esp_lower_drq();
78 s->drq_state = false;
79 }
80 }
81
82 static const char *esp_phase_names[8] = {
83 "DATA OUT", "DATA IN", "COMMAND", "STATUS",
84 "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
85 };
86
esp_set_phase(ESPState * s,uint8_t phase)87 static void esp_set_phase(ESPState *s, uint8_t phase)
88 {
89 s->rregs[ESP_RSTAT] &= ~7;
90 s->rregs[ESP_RSTAT] |= phase;
91
92 trace_esp_set_phase(esp_phase_names[phase]);
93 }
94
esp_get_phase(ESPState * s)95 static uint8_t esp_get_phase(ESPState *s)
96 {
97 return s->rregs[ESP_RSTAT] & 7;
98 }
99
esp_dma_enable(ESPState * s,int irq,int level)100 void esp_dma_enable(ESPState *s, int irq, int level)
101 {
102 if (level) {
103 s->dma_enabled = 1;
104 trace_esp_dma_enable();
105 if (s->dma_cb) {
106 s->dma_cb(s);
107 s->dma_cb = NULL;
108 }
109 } else {
110 trace_esp_dma_disable();
111 s->dma_enabled = 0;
112 }
113 }
114
esp_request_cancelled(SCSIRequest * req)115 void esp_request_cancelled(SCSIRequest *req)
116 {
117 ESPState *s = req->hba_private;
118
119 if (req == s->current_req) {
120 scsi_req_unref(s->current_req);
121 s->current_req = NULL;
122 s->current_dev = NULL;
123 s->async_len = 0;
124 }
125 }
126
esp_update_drq(ESPState * s)127 static void esp_update_drq(ESPState *s)
128 {
129 bool to_device;
130
131 switch (esp_get_phase(s)) {
132 case STAT_MO:
133 case STAT_CD:
134 case STAT_DO:
135 to_device = true;
136 break;
137
138 case STAT_DI:
139 case STAT_ST:
140 case STAT_MI:
141 to_device = false;
142 break;
143
144 default:
145 return;
146 }
147
148 if (s->dma) {
149 /* DMA request so update DRQ according to transfer direction */
150 if (to_device) {
151 if (fifo8_num_free(&s->fifo) < 2) {
152 esp_lower_drq(s);
153 } else {
154 esp_raise_drq(s);
155 }
156 } else {
157 if (fifo8_num_used(&s->fifo) < 2) {
158 esp_lower_drq(s);
159 } else {
160 esp_raise_drq(s);
161 }
162 }
163 } else {
164 /* Not a DMA request */
165 esp_lower_drq(s);
166 }
167 }
168
esp_fifo_push(ESPState * s,uint8_t val)169 static void esp_fifo_push(ESPState *s, uint8_t val)
170 {
171 if (fifo8_num_used(&s->fifo) == s->fifo.capacity) {
172 trace_esp_error_fifo_overrun();
173 } else {
174 fifo8_push(&s->fifo, val);
175 }
176
177 esp_update_drq(s);
178 }
179
esp_fifo_push_buf(ESPState * s,uint8_t * buf,int len)180 static void esp_fifo_push_buf(ESPState *s, uint8_t *buf, int len)
181 {
182 fifo8_push_all(&s->fifo, buf, len);
183 esp_update_drq(s);
184 }
185
esp_fifo_pop(ESPState * s)186 static uint8_t esp_fifo_pop(ESPState *s)
187 {
188 uint8_t val;
189
190 if (fifo8_is_empty(&s->fifo)) {
191 val = 0;
192 } else {
193 val = fifo8_pop(&s->fifo);
194 }
195
196 esp_update_drq(s);
197 return val;
198 }
199
esp_fifo_pop_buf(ESPState * s,uint8_t * dest,int maxlen)200 static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen)
201 {
202 uint32_t len = fifo8_pop_buf(&s->fifo, dest, maxlen);
203
204 esp_update_drq(s);
205 return len;
206 }
207
esp_get_tc(ESPState * s)208 static uint32_t esp_get_tc(ESPState *s)
209 {
210 uint32_t dmalen;
211
212 dmalen = s->rregs[ESP_TCLO];
213 dmalen |= s->rregs[ESP_TCMID] << 8;
214 dmalen |= s->rregs[ESP_TCHI] << 16;
215
216 return dmalen;
217 }
218
esp_set_tc(ESPState * s,uint32_t dmalen)219 static void esp_set_tc(ESPState *s, uint32_t dmalen)
220 {
221 uint32_t old_tc = esp_get_tc(s);
222
223 s->rregs[ESP_TCLO] = dmalen;
224 s->rregs[ESP_TCMID] = dmalen >> 8;
225 s->rregs[ESP_TCHI] = dmalen >> 16;
226
227 if (old_tc && dmalen == 0) {
228 s->rregs[ESP_RSTAT] |= STAT_TC;
229 }
230 }
231
esp_get_stc(ESPState * s)232 static uint32_t esp_get_stc(ESPState *s)
233 {
234 uint32_t dmalen;
235
236 dmalen = s->wregs[ESP_TCLO];
237 dmalen |= s->wregs[ESP_TCMID] << 8;
238 dmalen |= s->wregs[ESP_TCHI] << 16;
239
240 return dmalen;
241 }
242
esp_pdma_read(ESPState * s)243 static uint8_t esp_pdma_read(ESPState *s)
244 {
245 return esp_fifo_pop(s);
246 }
247
esp_pdma_write(ESPState * s,uint8_t val)248 static void esp_pdma_write(ESPState *s, uint8_t val)
249 {
250 uint32_t dmalen = esp_get_tc(s);
251
252 esp_fifo_push(s, val);
253
254 if (dmalen && s->drq_state) {
255 dmalen--;
256 esp_set_tc(s, dmalen);
257 }
258 }
259
esp_select(ESPState * s)260 static int esp_select(ESPState *s)
261 {
262 int target;
263
264 target = s->wregs[ESP_WBUSID] & BUSID_DID;
265
266 s->ti_size = 0;
267 s->rregs[ESP_RSEQ] = SEQ_0;
268
269 if (s->current_req) {
270 /* Started a new command before the old one finished. Cancel it. */
271 scsi_req_cancel(s->current_req);
272 }
273
274 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
275 if (!s->current_dev) {
276 /* No such drive */
277 s->rregs[ESP_RSTAT] = 0;
278 s->asc_mode = ESP_ASC_MODE_DIS;
279 s->rregs[ESP_RINTR] = INTR_DC;
280 esp_raise_irq(s);
281 return -1;
282 }
283
284 /*
285 * Note that we deliberately don't raise the IRQ here: this will be done
286 * either in esp_transfer_data() or esp_command_complete()
287 */
288 s->asc_mode = ESP_ASC_MODE_INI;
289 return 0;
290 }
291
292 static void esp_do_dma(ESPState *s);
293 static void esp_do_nodma(ESPState *s);
294
do_command_phase(ESPState * s)295 static void do_command_phase(ESPState *s)
296 {
297 uint32_t cmdlen;
298 int32_t datalen;
299 SCSIDevice *current_lun;
300 uint8_t buf[ESP_CMDFIFO_SZ];
301
302 trace_esp_do_command_phase(s->lun);
303 cmdlen = fifo8_num_used(&s->cmdfifo);
304 if (!cmdlen || !s->current_dev) {
305 return;
306 }
307 fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
308
309 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
310 if (!current_lun) {
311 /* No such drive */
312 s->rregs[ESP_RSTAT] = 0;
313 s->asc_mode = ESP_ASC_MODE_DIS;
314 s->rregs[ESP_RINTR] = INTR_DC;
315 s->rregs[ESP_RSEQ] = SEQ_0;
316 esp_raise_irq(s);
317 return;
318 }
319
320 s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
321 datalen = scsi_req_enqueue(s->current_req);
322 s->ti_size = datalen;
323 fifo8_reset(&s->cmdfifo);
324 s->data_ready = false;
325 if (datalen != 0) {
326 /*
327 * Switch to DATA phase but wait until initial data xfer is
328 * complete before raising the command completion interrupt
329 */
330 if (datalen > 0) {
331 esp_set_phase(s, STAT_DI);
332 } else {
333 esp_set_phase(s, STAT_DO);
334 }
335 scsi_req_continue(s->current_req);
336 return;
337 }
338 }
339
do_message_phase(ESPState * s)340 static void do_message_phase(ESPState *s)
341 {
342 if (s->cmdfifo_cdb_offset) {
343 uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 :
344 fifo8_pop(&s->cmdfifo);
345
346 trace_esp_do_identify(message);
347 s->lun = message & 7;
348 s->cmdfifo_cdb_offset--;
349 }
350
351 /* Ignore extended messages for now */
352 if (s->cmdfifo_cdb_offset) {
353 int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
354 fifo8_drop(&s->cmdfifo, len);
355 s->cmdfifo_cdb_offset = 0;
356 }
357 }
358
do_cmd(ESPState * s)359 static void do_cmd(ESPState *s)
360 {
361 do_message_phase(s);
362 assert(s->cmdfifo_cdb_offset == 0);
363 do_command_phase(s);
364 }
365
handle_satn(ESPState * s)366 static void handle_satn(ESPState *s)
367 {
368 if (s->dma && !s->dma_enabled) {
369 s->dma_cb = handle_satn;
370 return;
371 }
372
373 if (esp_select(s) < 0) {
374 return;
375 }
376
377 esp_set_phase(s, STAT_MO);
378
379 if (s->dma) {
380 esp_do_dma(s);
381 } else {
382 esp_do_nodma(s);
383 }
384 }
385
handle_s_without_atn(ESPState * s)386 static void handle_s_without_atn(ESPState *s)
387 {
388 if (s->dma && !s->dma_enabled) {
389 s->dma_cb = handle_s_without_atn;
390 return;
391 }
392
393 if (esp_select(s) < 0) {
394 return;
395 }
396
397 esp_set_phase(s, STAT_CD);
398 s->cmdfifo_cdb_offset = 0;
399
400 if (s->dma) {
401 esp_do_dma(s);
402 } else {
403 esp_do_nodma(s);
404 }
405 }
406
handle_satn_stop(ESPState * s)407 static void handle_satn_stop(ESPState *s)
408 {
409 if (s->dma && !s->dma_enabled) {
410 s->dma_cb = handle_satn_stop;
411 return;
412 }
413
414 if (esp_select(s) < 0) {
415 return;
416 }
417
418 esp_set_phase(s, STAT_MO);
419 s->cmdfifo_cdb_offset = 0;
420
421 if (s->dma) {
422 esp_do_dma(s);
423 } else {
424 esp_do_nodma(s);
425 }
426 }
427
handle_pad(ESPState * s)428 static void handle_pad(ESPState *s)
429 {
430 if (s->dma) {
431 esp_do_dma(s);
432 } else {
433 esp_do_nodma(s);
434 }
435 }
436
write_response(ESPState * s)437 static void write_response(ESPState *s)
438 {
439 trace_esp_write_response(s->status);
440
441 if (s->dma) {
442 esp_do_dma(s);
443 } else {
444 esp_do_nodma(s);
445 }
446 }
447
esp_cdb_ready(ESPState * s)448 static bool esp_cdb_ready(ESPState *s)
449 {
450 /* scsi_cdb_length() only reads the first byte */
451 int limit = s->cmdfifo_cdb_offset + 1;
452 int len = fifo8_num_used(&s->cmdfifo);
453 const uint8_t *pbuf;
454 uint32_t n;
455 int cdblen;
456
457 if (len <= 0) {
458 return false;
459 }
460
461 pbuf = fifo8_peek_bufptr(&s->cmdfifo, len, &n);
462 if (n < limit) {
463 /*
464 * In normal use the cmdfifo should never wrap, but include this check
465 * to prevent a malicious guest from reading past the end of the
466 * cmdfifo data buffer below
467 */
468 return false;
469 }
470
471 cdblen = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
472
473 return cdblen < 0 ? false : (len >= cdblen);
474 }
475
esp_dma_ti_check(ESPState * s)476 static void esp_dma_ti_check(ESPState *s)
477 {
478 if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
479 s->rregs[ESP_RINTR] |= INTR_BS;
480 esp_raise_irq(s);
481 }
482 }
483
esp_do_dma(ESPState * s)484 static void esp_do_dma(ESPState *s)
485 {
486 uint32_t len, cmdlen;
487 uint8_t buf[ESP_CMDFIFO_SZ];
488
489 len = esp_get_tc(s);
490
491 switch (esp_get_phase(s)) {
492 case STAT_MO:
493 if (s->dma_memory_read) {
494 len = MIN(len, fifo8_num_free(&s->cmdfifo));
495 if (len) {
496 s->dma_memory_read(s->dma_opaque, buf, len);
497 esp_set_tc(s, esp_get_tc(s) - len);
498 }
499 } else {
500 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
501 len = MIN(fifo8_num_free(&s->cmdfifo), len);
502 }
503
504 fifo8_push_all(&s->cmdfifo, buf, len);
505 s->cmdfifo_cdb_offset += len;
506
507 switch (s->rregs[ESP_CMD]) {
508 case CMD_SELATN | CMD_DMA:
509 if (fifo8_num_used(&s->cmdfifo) >= 1) {
510 /* First byte received, switch to command phase */
511 esp_set_phase(s, STAT_CD);
512 s->rregs[ESP_RSEQ] = SEQ_CD;
513 s->cmdfifo_cdb_offset = 1;
514
515 if (fifo8_num_used(&s->cmdfifo) > 1) {
516 /* Process any additional command phase data */
517 esp_do_dma(s);
518 }
519 }
520 break;
521
522 case CMD_SELATNS | CMD_DMA:
523 if (fifo8_num_used(&s->cmdfifo) == 1) {
524 /* First byte received, stop in message out phase */
525 s->rregs[ESP_RSEQ] = SEQ_MO;
526 s->cmdfifo_cdb_offset = 1;
527
528 /* Raise command completion interrupt */
529 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
530 esp_raise_irq(s);
531 }
532 break;
533
534 case CMD_TI | CMD_DMA:
535 /* ATN remains asserted until TC == 0 */
536 if (esp_get_tc(s) == 0) {
537 esp_set_phase(s, STAT_CD);
538 s->rregs[ESP_CMD] = 0;
539 s->rregs[ESP_RINTR] |= INTR_BS;
540 esp_raise_irq(s);
541 }
542 break;
543 }
544 break;
545
546 case STAT_CD:
547 cmdlen = fifo8_num_used(&s->cmdfifo);
548 trace_esp_do_dma(cmdlen, len);
549 if (s->dma_memory_read) {
550 len = MIN(len, fifo8_num_free(&s->cmdfifo));
551 if (len) {
552 s->dma_memory_read(s->dma_opaque, buf, len);
553 fifo8_push_all(&s->cmdfifo, buf, len);
554 esp_set_tc(s, esp_get_tc(s) - len);
555 }
556 } else {
557 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
558 len = MIN(fifo8_num_free(&s->cmdfifo), len);
559 fifo8_push_all(&s->cmdfifo, buf, len);
560 }
561 trace_esp_handle_ti_cmd(cmdlen);
562 s->ti_size = 0;
563 if (esp_get_tc(s) == 0) {
564 /* Command has been received */
565 do_cmd(s);
566 }
567 break;
568
569 case STAT_DO:
570 if (!s->current_req) {
571 return;
572 }
573 if (s->async_len == 0 && esp_get_tc(s)) {
574 /* Defer until data is available. */
575 return;
576 }
577 if (len > s->async_len) {
578 len = s->async_len;
579 }
580
581 switch (s->rregs[ESP_CMD]) {
582 case CMD_TI | CMD_DMA:
583 if (s->dma_memory_read) {
584 if (len) {
585 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
586 esp_set_tc(s, esp_get_tc(s) - len);
587 }
588 } else {
589 /* Copy FIFO data to device */
590 len = MIN(s->async_len, ESP_FIFO_SZ);
591 len = MIN(len, fifo8_num_used(&s->fifo));
592 len = esp_fifo_pop_buf(s, s->async_buf, len);
593 }
594
595 s->async_buf += len;
596 s->async_len -= len;
597 s->ti_size += len;
598 break;
599
600 case CMD_PAD | CMD_DMA:
601 /* Copy TC zero bytes into the incoming stream */
602 if (!s->dma_memory_read) {
603 len = MIN(s->async_len, ESP_FIFO_SZ);
604 len = MIN(len, fifo8_num_free(&s->fifo));
605 }
606
607 memset(s->async_buf, 0, len);
608
609 s->async_buf += len;
610 s->async_len -= len;
611 s->ti_size += len;
612 break;
613 }
614
615 if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
616 /* Defer until the scsi layer has completed */
617 scsi_req_continue(s->current_req);
618 return;
619 }
620
621 esp_dma_ti_check(s);
622 break;
623
624 case STAT_DI:
625 if (!s->current_req) {
626 return;
627 }
628 if (s->async_len == 0 && esp_get_tc(s)) {
629 /* Defer until data is available. */
630 return;
631 }
632 if (len > s->async_len) {
633 len = s->async_len;
634 }
635
636 switch (s->rregs[ESP_CMD]) {
637 case CMD_TI | CMD_DMA:
638 if (s->dma_memory_write) {
639 if (len) {
640 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
641 }
642 } else {
643 /* Copy device data to FIFO */
644 len = MIN(len, fifo8_num_free(&s->fifo));
645 esp_fifo_push_buf(s, s->async_buf, len);
646 }
647
648 s->async_buf += len;
649 s->async_len -= len;
650 s->ti_size -= len;
651 esp_set_tc(s, esp_get_tc(s) - len);
652 break;
653
654 case CMD_PAD | CMD_DMA:
655 /* Drop TC bytes from the incoming stream */
656 if (!s->dma_memory_write) {
657 len = MIN(len, fifo8_num_free(&s->fifo));
658 }
659
660 s->async_buf += len;
661 s->async_len -= len;
662 s->ti_size -= len;
663 esp_set_tc(s, esp_get_tc(s) - len);
664 break;
665 }
666
667 if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
668 /* If the guest underflows TC then terminate SCSI request */
669 scsi_req_continue(s->current_req);
670 return;
671 }
672
673 if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
674 /* Defer until the scsi layer has completed */
675 scsi_req_continue(s->current_req);
676 return;
677 }
678
679 esp_dma_ti_check(s);
680 break;
681
682 case STAT_ST:
683 switch (s->rregs[ESP_CMD]) {
684 case CMD_ICCS | CMD_DMA:
685 len = MIN(len, 1);
686
687 if (len) {
688 buf[0] = s->status;
689
690 if (s->dma_memory_write) {
691 /* Length already non-zero */
692 s->dma_memory_write(s->dma_opaque, buf, len);
693 } else {
694 esp_fifo_push_buf(s, buf, len);
695 }
696
697 esp_set_tc(s, esp_get_tc(s) - len);
698 esp_set_phase(s, STAT_MI);
699
700 if (esp_get_tc(s) > 0) {
701 /* Process any message in phase data */
702 esp_do_dma(s);
703 }
704 }
705 break;
706
707 default:
708 /* Consume remaining data if the guest underflows TC */
709 if (fifo8_num_used(&s->fifo) < 2) {
710 s->rregs[ESP_RINTR] |= INTR_BS;
711 esp_raise_irq(s);
712 }
713 break;
714 }
715 break;
716
717 case STAT_MI:
718 switch (s->rregs[ESP_CMD]) {
719 case CMD_ICCS | CMD_DMA:
720 len = MIN(len, 1);
721
722 if (len) {
723 buf[0] = 0;
724
725 if (s->dma_memory_write) {
726 /* Length already non-zero */
727 s->dma_memory_write(s->dma_opaque, buf, len);
728 } else {
729 esp_fifo_push_buf(s, buf, len);
730 }
731
732 esp_set_tc(s, esp_get_tc(s) - len);
733
734 /* Raise end of command interrupt */
735 s->rregs[ESP_RINTR] |= INTR_FC;
736 esp_raise_irq(s);
737 }
738 break;
739 }
740 break;
741 }
742 }
743
esp_nodma_ti_dataout(ESPState * s)744 static void esp_nodma_ti_dataout(ESPState *s)
745 {
746 int len;
747
748 if (!s->current_req) {
749 return;
750 }
751 if (s->async_len == 0) {
752 /* Defer until data is available. */
753 return;
754 }
755 len = MIN(s->async_len, ESP_FIFO_SZ);
756 len = MIN(len, fifo8_num_used(&s->fifo));
757 esp_fifo_pop_buf(s, s->async_buf, len);
758 s->async_buf += len;
759 s->async_len -= len;
760 s->ti_size += len;
761
762 if (s->async_len == 0) {
763 scsi_req_continue(s->current_req);
764 return;
765 }
766
767 s->rregs[ESP_RINTR] |= INTR_BS;
768 esp_raise_irq(s);
769 }
770
esp_do_nodma(ESPState * s)771 static void esp_do_nodma(ESPState *s)
772 {
773 uint8_t buf[ESP_FIFO_SZ];
774 uint32_t cmdlen;
775 int len;
776
777 switch (esp_get_phase(s)) {
778 case STAT_MO:
779 switch (s->rregs[ESP_CMD]) {
780 case CMD_SELATN:
781 /* Copy FIFO into cmdfifo */
782 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
783 len = MIN(fifo8_num_free(&s->cmdfifo), len);
784 fifo8_push_all(&s->cmdfifo, buf, len);
785
786 if (fifo8_num_used(&s->cmdfifo) >= 1) {
787 /* First byte received, switch to command phase */
788 esp_set_phase(s, STAT_CD);
789 s->rregs[ESP_RSEQ] = SEQ_CD;
790 s->cmdfifo_cdb_offset = 1;
791
792 if (fifo8_num_used(&s->cmdfifo) > 1) {
793 /* Process any additional command phase data */
794 esp_do_nodma(s);
795 }
796 }
797 break;
798
799 case CMD_SELATNS:
800 /* Copy one byte from FIFO into cmdfifo */
801 len = esp_fifo_pop_buf(s, buf,
802 MIN(fifo8_num_used(&s->fifo), 1));
803 len = MIN(fifo8_num_free(&s->cmdfifo), len);
804 fifo8_push_all(&s->cmdfifo, buf, len);
805
806 if (fifo8_num_used(&s->cmdfifo) >= 1) {
807 /* First byte received, stop in message out phase */
808 s->rregs[ESP_RSEQ] = SEQ_MO;
809 s->cmdfifo_cdb_offset = 1;
810
811 /* Raise command completion interrupt */
812 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
813 esp_raise_irq(s);
814 }
815 break;
816
817 case CMD_TI:
818 /* Copy FIFO into cmdfifo */
819 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
820 len = MIN(fifo8_num_free(&s->cmdfifo), len);
821 fifo8_push_all(&s->cmdfifo, buf, len);
822
823 /* ATN remains asserted until FIFO empty */
824 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
825 esp_set_phase(s, STAT_CD);
826 s->rregs[ESP_CMD] = 0;
827 s->rregs[ESP_RINTR] |= INTR_BS;
828 esp_raise_irq(s);
829 break;
830 }
831 break;
832
833 case STAT_CD:
834 switch (s->rregs[ESP_CMD]) {
835 case CMD_TI:
836 /* Copy FIFO into cmdfifo */
837 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
838 len = MIN(fifo8_num_free(&s->cmdfifo), len);
839 fifo8_push_all(&s->cmdfifo, buf, len);
840
841 cmdlen = fifo8_num_used(&s->cmdfifo);
842 trace_esp_handle_ti_cmd(cmdlen);
843
844 /* CDB may be transferred in one or more TI commands */
845 if (esp_cdb_ready(s)) {
846 /* Command has been received */
847 do_cmd(s);
848 } else {
849 /*
850 * If data was transferred from the FIFO then raise bus
851 * service interrupt to indicate transfer complete. Otherwise
852 * defer until the next FIFO write.
853 */
854 if (len) {
855 /* Raise interrupt to indicate transfer complete */
856 s->rregs[ESP_RINTR] |= INTR_BS;
857 esp_raise_irq(s);
858 }
859 }
860 break;
861
862 case CMD_SEL | CMD_DMA:
863 case CMD_SELATN | CMD_DMA:
864 /* Copy FIFO into cmdfifo */
865 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
866 len = MIN(fifo8_num_free(&s->cmdfifo), len);
867 fifo8_push_all(&s->cmdfifo, buf, len);
868
869 /* Handle when DMA transfer is terminated by non-DMA FIFO write */
870 if (esp_cdb_ready(s)) {
871 /* Command has been received */
872 do_cmd(s);
873 }
874 break;
875
876 case CMD_SEL:
877 case CMD_SELATN:
878 /* FIFO already contain entire CDB: copy to cmdfifo and execute */
879 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
880 len = MIN(fifo8_num_free(&s->cmdfifo), len);
881 fifo8_push_all(&s->cmdfifo, buf, len);
882
883 do_cmd(s);
884 break;
885 }
886 break;
887
888 case STAT_DO:
889 /* Accumulate data in FIFO until non-DMA TI is executed */
890 break;
891
892 case STAT_DI:
893 if (!s->current_req) {
894 return;
895 }
896 if (s->async_len == 0) {
897 /* Defer until data is available. */
898 return;
899 }
900 if (fifo8_is_empty(&s->fifo)) {
901 esp_fifo_push(s, s->async_buf[0]);
902 s->async_buf++;
903 s->async_len--;
904 s->ti_size--;
905 }
906
907 if (s->async_len == 0) {
908 scsi_req_continue(s->current_req);
909 return;
910 }
911
912 /* If preloading the FIFO, defer until TI command issued */
913 if (s->rregs[ESP_CMD] != CMD_TI) {
914 return;
915 }
916
917 s->rregs[ESP_RINTR] |= INTR_BS;
918 esp_raise_irq(s);
919 break;
920
921 case STAT_ST:
922 switch (s->rregs[ESP_CMD]) {
923 case CMD_ICCS:
924 esp_fifo_push(s, s->status);
925 esp_set_phase(s, STAT_MI);
926
927 /* Process any message in phase data */
928 esp_do_nodma(s);
929 break;
930 }
931 break;
932
933 case STAT_MI:
934 switch (s->rregs[ESP_CMD]) {
935 case CMD_ICCS:
936 esp_fifo_push(s, 0);
937
938 /* Raise end of command interrupt */
939 s->rregs[ESP_RINTR] |= INTR_FC;
940 esp_raise_irq(s);
941 break;
942 }
943 break;
944 }
945 }
946
esp_command_complete(SCSIRequest * req,size_t resid)947 void esp_command_complete(SCSIRequest *req, size_t resid)
948 {
949 ESPState *s = req->hba_private;
950 int to_device = (esp_get_phase(s) == STAT_DO);
951
952 trace_esp_command_complete();
953
954 /*
955 * Non-DMA transfers from the target will leave the last byte in
956 * the FIFO so don't reset ti_size in this case
957 */
958 if (s->dma || to_device) {
959 if (s->ti_size != 0) {
960 trace_esp_command_complete_unexpected();
961 }
962 }
963
964 s->async_len = 0;
965 if (req->status) {
966 trace_esp_command_complete_fail();
967 }
968 s->status = req->status;
969
970 /*
971 * Switch to status phase. For non-DMA transfers from the target the last
972 * byte is still in the FIFO
973 */
974 s->ti_size = 0;
975
976 switch (s->rregs[ESP_CMD]) {
977 case CMD_SEL | CMD_DMA:
978 case CMD_SEL:
979 case CMD_SELATN | CMD_DMA:
980 case CMD_SELATN:
981 /*
982 * No data phase for sequencer command so raise deferred bus service
983 * and function complete interrupt
984 */
985 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
986 s->rregs[ESP_RSEQ] = SEQ_CD;
987 break;
988
989 case CMD_TI | CMD_DMA:
990 case CMD_TI:
991 s->rregs[ESP_CMD] = 0;
992 break;
993 }
994
995 /* Raise bus service interrupt to indicate change to STATUS phase */
996 esp_set_phase(s, STAT_ST);
997 s->rregs[ESP_RINTR] |= INTR_BS;
998 esp_raise_irq(s);
999
1000 if (s->current_req) {
1001 scsi_req_unref(s->current_req);
1002 s->current_req = NULL;
1003 s->current_dev = NULL;
1004 }
1005 }
1006
esp_transfer_data(SCSIRequest * req,uint32_t len)1007 void esp_transfer_data(SCSIRequest *req, uint32_t len)
1008 {
1009 ESPState *s = req->hba_private;
1010 uint32_t dmalen = esp_get_tc(s);
1011
1012 trace_esp_transfer_data(dmalen, s->ti_size);
1013 s->async_len = len;
1014 s->async_buf = scsi_req_get_buf(req);
1015
1016 if (!s->data_ready) {
1017 s->data_ready = true;
1018
1019 switch (s->rregs[ESP_CMD]) {
1020 case CMD_SEL | CMD_DMA:
1021 case CMD_SEL:
1022 case CMD_SELATN | CMD_DMA:
1023 case CMD_SELATN:
1024 /*
1025 * Initial incoming data xfer is complete for sequencer command
1026 * so raise deferred bus service and function complete interrupt
1027 */
1028 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
1029 s->rregs[ESP_RSEQ] = SEQ_CD;
1030 esp_raise_irq(s);
1031 break;
1032
1033 case CMD_SELATNS | CMD_DMA:
1034 case CMD_SELATNS:
1035 /*
1036 * Initial incoming data xfer is complete so raise command
1037 * completion interrupt
1038 */
1039 s->rregs[ESP_RINTR] |= INTR_BS;
1040 s->rregs[ESP_RSEQ] = SEQ_MO;
1041 esp_raise_irq(s);
1042 break;
1043
1044 case CMD_TI | CMD_DMA:
1045 case CMD_TI:
1046 /*
1047 * If the final COMMAND phase data was transferred using a TI
1048 * command, clear ESP_CMD to terminate the TI command and raise
1049 * the completion interrupt
1050 */
1051 s->rregs[ESP_CMD] = 0;
1052 s->rregs[ESP_RINTR] |= INTR_BS;
1053 esp_raise_irq(s);
1054 break;
1055 }
1056 }
1057
1058 /*
1059 * Always perform the initial transfer upon reception of the next TI
1060 * command to ensure the DMA/non-DMA status of the command is correct.
1061 * It is not possible to use s->dma directly in the section below as
1062 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1063 * async data transfer is delayed then s->dma is set incorrectly.
1064 */
1065
1066 if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
1067 /* When the SCSI layer returns more data, raise deferred INTR_BS */
1068 esp_dma_ti_check(s);
1069
1070 esp_do_dma(s);
1071 } else if (s->rregs[ESP_CMD] == CMD_TI) {
1072 esp_do_nodma(s);
1073 }
1074 }
1075
handle_ti(ESPState * s)1076 static void handle_ti(ESPState *s)
1077 {
1078 uint32_t dmalen;
1079
1080 if (s->dma && !s->dma_enabled) {
1081 s->dma_cb = handle_ti;
1082 return;
1083 }
1084
1085 if (s->dma) {
1086 dmalen = esp_get_tc(s);
1087 trace_esp_handle_ti(dmalen);
1088 esp_do_dma(s);
1089 } else {
1090 trace_esp_handle_ti(s->ti_size);
1091 esp_do_nodma(s);
1092
1093 if (esp_get_phase(s) == STAT_DO) {
1094 esp_nodma_ti_dataout(s);
1095 }
1096 }
1097 }
1098
esp_hard_reset(ESPState * s)1099 void esp_hard_reset(ESPState *s)
1100 {
1101 memset(s->rregs, 0, ESP_REGS);
1102 memset(s->wregs, 0, ESP_REGS);
1103 s->tchi_written = 0;
1104 s->ti_size = 0;
1105 s->async_len = 0;
1106 fifo8_reset(&s->fifo);
1107 fifo8_reset(&s->cmdfifo);
1108 s->dma = 0;
1109 s->dma_cb = NULL;
1110 s->asc_mode = ESP_ASC_MODE_DIS;
1111
1112 s->rregs[ESP_CFG1] = 7;
1113 }
1114
esp_soft_reset(ESPState * s)1115 static void esp_soft_reset(ESPState *s)
1116 {
1117 qemu_irq_lower(s->irq);
1118 qemu_irq_lower(s->drq_irq);
1119 esp_hard_reset(s);
1120 }
1121
esp_bus_reset(ESPState * s)1122 static void esp_bus_reset(ESPState *s)
1123 {
1124 bus_cold_reset(BUS(&s->bus));
1125 }
1126
parent_esp_reset(ESPState * s,int irq,int level)1127 static void parent_esp_reset(ESPState *s, int irq, int level)
1128 {
1129 if (level) {
1130 esp_soft_reset(s);
1131 }
1132 }
1133
esp_cmd_is_valid(ESPState * s,uint8_t cmd)1134 static bool esp_cmd_is_valid(ESPState *s, uint8_t cmd)
1135 {
1136 uint8_t cmd_group = (cmd & CMD_GRP_MASK) >> 4;
1137
1138 /* Always allow misc commands */
1139 if (cmd_group == CMD_GRP_MISC) {
1140 return true;
1141 }
1142
1143 switch (s->asc_mode) {
1144 case ESP_ASC_MODE_DIS:
1145 /* Disconnected mode: only allow disconnected commands */
1146 if (cmd_group == CMD_GRP_DISC) {
1147 return true;
1148 }
1149 break;
1150
1151 case ESP_ASC_MODE_INI:
1152 /* Initiator mode: allow initiator commands */
1153 if (cmd_group == CMD_GRP_INIT) {
1154 return true;
1155 }
1156 break;
1157
1158 default:
1159 g_assert_not_reached();
1160 }
1161
1162 trace_esp_invalid_cmd(cmd, s->asc_mode);
1163 return false;
1164 }
1165
esp_run_cmd(ESPState * s)1166 static void esp_run_cmd(ESPState *s)
1167 {
1168 uint8_t cmd = s->rregs[ESP_CMD];
1169
1170 if (cmd & CMD_DMA) {
1171 s->dma = 1;
1172 /* Reload DMA counter. */
1173 if (esp_get_stc(s) == 0) {
1174 esp_set_tc(s, 0x10000);
1175 } else {
1176 esp_set_tc(s, esp_get_stc(s));
1177 }
1178 } else {
1179 s->dma = 0;
1180 }
1181 switch (cmd & CMD_CMD) {
1182 case CMD_NOP:
1183 trace_esp_mem_writeb_cmd_nop(cmd);
1184 break;
1185 case CMD_FLUSH:
1186 trace_esp_mem_writeb_cmd_flush(cmd);
1187 fifo8_reset(&s->fifo);
1188 break;
1189 case CMD_RESET:
1190 trace_esp_mem_writeb_cmd_reset(cmd);
1191 esp_soft_reset(s);
1192 break;
1193 case CMD_BUSRESET:
1194 trace_esp_mem_writeb_cmd_bus_reset(cmd);
1195 esp_bus_reset(s);
1196 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1197 s->rregs[ESP_RINTR] |= INTR_RST;
1198 esp_raise_irq(s);
1199 }
1200 break;
1201 case CMD_TI:
1202 trace_esp_mem_writeb_cmd_ti(cmd);
1203 handle_ti(s);
1204 break;
1205 case CMD_ICCS:
1206 trace_esp_mem_writeb_cmd_iccs(cmd);
1207 write_response(s);
1208 break;
1209 case CMD_MSGACC:
1210 trace_esp_mem_writeb_cmd_msgacc(cmd);
1211 s->asc_mode = ESP_ASC_MODE_DIS;
1212 s->rregs[ESP_RINTR] |= INTR_DC;
1213 s->rregs[ESP_RSEQ] = 0;
1214 s->rregs[ESP_RFLAGS] = 0;
1215 esp_raise_irq(s);
1216 break;
1217 case CMD_PAD:
1218 trace_esp_mem_writeb_cmd_pad(cmd);
1219 handle_pad(s);
1220 break;
1221 case CMD_SATN:
1222 trace_esp_mem_writeb_cmd_satn(cmd);
1223 break;
1224 case CMD_RSTATN:
1225 trace_esp_mem_writeb_cmd_rstatn(cmd);
1226 break;
1227 case CMD_SEL:
1228 trace_esp_mem_writeb_cmd_sel(cmd);
1229 handle_s_without_atn(s);
1230 break;
1231 case CMD_SELATN:
1232 trace_esp_mem_writeb_cmd_selatn(cmd);
1233 handle_satn(s);
1234 break;
1235 case CMD_SELATNS:
1236 trace_esp_mem_writeb_cmd_selatns(cmd);
1237 handle_satn_stop(s);
1238 break;
1239 case CMD_ENSEL:
1240 trace_esp_mem_writeb_cmd_ensel(cmd);
1241 s->rregs[ESP_RINTR] = 0;
1242 break;
1243 case CMD_DISSEL:
1244 trace_esp_mem_writeb_cmd_dissel(cmd);
1245 s->rregs[ESP_RINTR] = 0;
1246 esp_raise_irq(s);
1247 break;
1248 default:
1249 trace_esp_error_unhandled_command(cmd);
1250 break;
1251 }
1252 }
1253
esp_reg_read(ESPState * s,uint32_t saddr)1254 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1255 {
1256 uint32_t val;
1257
1258 switch (saddr) {
1259 case ESP_FIFO:
1260 s->rregs[ESP_FIFO] = esp_fifo_pop(s);
1261 val = s->rregs[ESP_FIFO];
1262 break;
1263 case ESP_RINTR:
1264 /*
1265 * Clear sequence step, interrupt register and all status bits
1266 * except TC
1267 */
1268 val = s->rregs[ESP_RINTR];
1269 s->rregs[ESP_RINTR] = 0;
1270 esp_lower_irq(s);
1271 s->rregs[ESP_RSTAT] &= STAT_TC | 7;
1272 /*
1273 * According to the datasheet ESP_RSEQ should be cleared, but as the
1274 * emulation currently defers information transfers to the next TI
1275 * command leave it for now so that pedantic guests such as the old
1276 * Linux 2.6 driver see the correct flags before the next SCSI phase
1277 * transition.
1278 *
1279 * s->rregs[ESP_RSEQ] = SEQ_0;
1280 */
1281 break;
1282 case ESP_TCHI:
1283 /* Return the unique id if the value has never been written */
1284 if (!s->tchi_written) {
1285 val = s->chip_id;
1286 } else {
1287 val = s->rregs[saddr];
1288 }
1289 break;
1290 case ESP_RFLAGS:
1291 /* Bottom 5 bits indicate number of bytes in FIFO */
1292 val = fifo8_num_used(&s->fifo);
1293 break;
1294 default:
1295 val = s->rregs[saddr];
1296 break;
1297 }
1298
1299 trace_esp_mem_readb(saddr, val);
1300 return val;
1301 }
1302
esp_reg_write(ESPState * s,uint32_t saddr,uint64_t val)1303 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1304 {
1305 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1306 switch (saddr) {
1307 case ESP_TCHI:
1308 s->tchi_written = true;
1309 /* fall through */
1310 case ESP_TCLO:
1311 case ESP_TCMID:
1312 s->rregs[ESP_RSTAT] &= ~STAT_TC;
1313 break;
1314 case ESP_FIFO:
1315 if (!fifo8_is_full(&s->fifo)) {
1316 esp_fifo_push(s, val);
1317 }
1318 esp_do_nodma(s);
1319 break;
1320 case ESP_CMD:
1321 s->rregs[saddr] = val;
1322 if (!esp_cmd_is_valid(s, s->rregs[saddr])) {
1323 s->rregs[ESP_RSTAT] |= INTR_IL;
1324 esp_raise_irq(s);
1325 break;
1326 }
1327 esp_run_cmd(s);
1328 break;
1329 case ESP_WBUSID ... ESP_WSYNO:
1330 break;
1331 case ESP_CFG1:
1332 case ESP_CFG2: case ESP_CFG3:
1333 case ESP_RES3: case ESP_RES4:
1334 s->rregs[saddr] = val;
1335 break;
1336 case ESP_WCCF ... ESP_WTEST:
1337 break;
1338 default:
1339 trace_esp_error_invalid_write(val, saddr);
1340 return;
1341 }
1342 s->wregs[saddr] = val;
1343 }
1344
esp_mem_accepts(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)1345 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1346 unsigned size, bool is_write,
1347 MemTxAttrs attrs)
1348 {
1349 return (size == 1) || (is_write && size == 4);
1350 }
1351
esp_is_before_version_5(void * opaque,int version_id)1352 static bool esp_is_before_version_5(void *opaque, int version_id)
1353 {
1354 ESPState *s = ESP(opaque);
1355
1356 version_id = MIN(version_id, s->mig_version_id);
1357 return version_id < 5;
1358 }
1359
esp_is_version_5(void * opaque,int version_id)1360 static bool esp_is_version_5(void *opaque, int version_id)
1361 {
1362 ESPState *s = ESP(opaque);
1363
1364 version_id = MIN(version_id, s->mig_version_id);
1365 return version_id >= 5;
1366 }
1367
esp_is_version_6(void * opaque,int version_id)1368 static bool esp_is_version_6(void *opaque, int version_id)
1369 {
1370 ESPState *s = ESP(opaque);
1371
1372 version_id = MIN(version_id, s->mig_version_id);
1373 return version_id >= 6;
1374 }
1375
esp_is_between_version_5_and_6(void * opaque,int version_id)1376 static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
1377 {
1378 ESPState *s = ESP(opaque);
1379
1380 version_id = MIN(version_id, s->mig_version_id);
1381 return version_id >= 5 && version_id <= 6;
1382 }
1383
esp_is_version_8(void * opaque,int version_id)1384 static bool esp_is_version_8(void *opaque, int version_id)
1385 {
1386 ESPState *s = ESP(opaque);
1387
1388 version_id = MIN(version_id, s->mig_version_id);
1389 return version_id >= 8;
1390 }
1391
esp_pre_save(void * opaque)1392 int esp_pre_save(void *opaque)
1393 {
1394 ESPState *s = ESP(object_resolve_path_component(
1395 OBJECT(opaque), "esp"));
1396
1397 s->mig_version_id = vmstate_esp.version_id;
1398 return 0;
1399 }
1400
esp_post_load(void * opaque,int version_id)1401 static int esp_post_load(void *opaque, int version_id)
1402 {
1403 ESPState *s = ESP(opaque);
1404 int len, i;
1405
1406 version_id = MIN(version_id, s->mig_version_id);
1407
1408 if (version_id < 5) {
1409 esp_set_tc(s, s->mig_dma_left);
1410
1411 /* Migrate ti_buf to fifo */
1412 len = s->mig_ti_wptr - s->mig_ti_rptr;
1413 for (i = 0; i < len; i++) {
1414 fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1415 }
1416
1417 /* Migrate cmdbuf to cmdfifo */
1418 for (i = 0; i < s->mig_cmdlen; i++) {
1419 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1420 }
1421 }
1422
1423 if (version_id < 8) {
1424 /* Assume initiator mode to allow all commands to continue */
1425 s->asc_mode = ESP_ASC_MODE_INI;
1426 }
1427
1428 s->mig_version_id = vmstate_esp.version_id;
1429 return 0;
1430 }
1431
1432 const VMStateDescription vmstate_esp = {
1433 .name = "esp",
1434 .version_id = 8,
1435 .minimum_version_id = 3,
1436 .post_load = esp_post_load,
1437 .fields = (const VMStateField[]) {
1438 VMSTATE_BUFFER(rregs, ESPState),
1439 VMSTATE_BUFFER(wregs, ESPState),
1440 VMSTATE_INT32(ti_size, ESPState),
1441 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1442 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1443 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1444 VMSTATE_UINT32(status, ESPState),
1445 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1446 esp_is_before_version_5),
1447 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1448 esp_is_before_version_5),
1449 VMSTATE_UINT32(dma, ESPState),
1450 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1451 esp_is_before_version_5, 0, 16),
1452 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1453 esp_is_before_version_5, 16,
1454 sizeof(typeof_field(ESPState, mig_cmdbuf))),
1455 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1456 VMSTATE_UINT32(do_cmd, ESPState),
1457 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1458 VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
1459 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1460 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1461 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1462 VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
1463 esp_is_between_version_5_and_6),
1464 VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1465 VMSTATE_BOOL(drq_state, ESPState),
1466 VMSTATE_UINT8_TEST(asc_mode, ESPState, esp_is_version_8),
1467 VMSTATE_END_OF_LIST()
1468 },
1469 };
1470
sysbus_esp_mem_write(void * opaque,hwaddr addr,uint64_t val,unsigned int size)1471 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1472 uint64_t val, unsigned int size)
1473 {
1474 SysBusESPState *sysbus = opaque;
1475 ESPState *s = ESP(&sysbus->esp);
1476 uint32_t saddr;
1477
1478 saddr = addr >> sysbus->it_shift;
1479 esp_reg_write(s, saddr, val);
1480 }
1481
sysbus_esp_mem_read(void * opaque,hwaddr addr,unsigned int size)1482 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1483 unsigned int size)
1484 {
1485 SysBusESPState *sysbus = opaque;
1486 ESPState *s = ESP(&sysbus->esp);
1487 uint32_t saddr;
1488
1489 saddr = addr >> sysbus->it_shift;
1490 return esp_reg_read(s, saddr);
1491 }
1492
1493 static const MemoryRegionOps sysbus_esp_mem_ops = {
1494 .read = sysbus_esp_mem_read,
1495 .write = sysbus_esp_mem_write,
1496 .endianness = DEVICE_NATIVE_ENDIAN,
1497 .valid.accepts = esp_mem_accepts,
1498 };
1499
sysbus_esp_pdma_write(void * opaque,hwaddr addr,uint64_t val,unsigned int size)1500 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1501 uint64_t val, unsigned int size)
1502 {
1503 SysBusESPState *sysbus = opaque;
1504 ESPState *s = ESP(&sysbus->esp);
1505
1506 trace_esp_pdma_write(size);
1507
1508 switch (size) {
1509 case 1:
1510 esp_pdma_write(s, val);
1511 break;
1512 case 2:
1513 esp_pdma_write(s, val >> 8);
1514 esp_pdma_write(s, val);
1515 break;
1516 }
1517 esp_do_dma(s);
1518 }
1519
sysbus_esp_pdma_read(void * opaque,hwaddr addr,unsigned int size)1520 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1521 unsigned int size)
1522 {
1523 SysBusESPState *sysbus = opaque;
1524 ESPState *s = ESP(&sysbus->esp);
1525 uint64_t val = 0;
1526
1527 trace_esp_pdma_read(size);
1528
1529 switch (size) {
1530 case 1:
1531 val = esp_pdma_read(s);
1532 break;
1533 case 2:
1534 val = esp_pdma_read(s);
1535 val = (val << 8) | esp_pdma_read(s);
1536 break;
1537 }
1538 esp_do_dma(s);
1539 return val;
1540 }
1541
esp_load_request(QEMUFile * f,SCSIRequest * req)1542 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1543 {
1544 ESPState *s = container_of(req->bus, ESPState, bus);
1545
1546 scsi_req_ref(req);
1547 s->current_req = req;
1548 return s;
1549 }
1550
1551 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1552 .read = sysbus_esp_pdma_read,
1553 .write = sysbus_esp_pdma_write,
1554 .endianness = DEVICE_NATIVE_ENDIAN,
1555 .valid.min_access_size = 1,
1556 .valid.max_access_size = 4,
1557 .impl.min_access_size = 1,
1558 .impl.max_access_size = 2,
1559 };
1560
1561 static const struct SCSIBusInfo esp_scsi_info = {
1562 .tcq = false,
1563 .max_target = ESP_MAX_DEVS,
1564 .max_lun = 7,
1565
1566 .load_request = esp_load_request,
1567 .transfer_data = esp_transfer_data,
1568 .complete = esp_command_complete,
1569 .cancel = esp_request_cancelled
1570 };
1571
sysbus_esp_gpio_demux(void * opaque,int irq,int level)1572 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1573 {
1574 SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1575 ESPState *s = ESP(&sysbus->esp);
1576
1577 switch (irq) {
1578 case 0:
1579 parent_esp_reset(s, irq, level);
1580 break;
1581 case 1:
1582 esp_dma_enable(s, irq, level);
1583 break;
1584 }
1585 }
1586
sysbus_esp_realize(DeviceState * dev,Error ** errp)1587 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1588 {
1589 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1590 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1591 ESPState *s = ESP(&sysbus->esp);
1592
1593 if (!qdev_realize(DEVICE(s), NULL, errp)) {
1594 return;
1595 }
1596
1597 sysbus_init_irq(sbd, &s->irq);
1598 sysbus_init_irq(sbd, &s->drq_irq);
1599 assert(sysbus->it_shift != -1);
1600
1601 s->chip_id = TCHI_FAS100A;
1602 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1603 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1604 sysbus_init_mmio(sbd, &sysbus->iomem);
1605 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1606 sysbus, "esp-pdma", 4);
1607 sysbus_init_mmio(sbd, &sysbus->pdma);
1608
1609 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1610
1611 scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1612 }
1613
sysbus_esp_hard_reset(DeviceState * dev)1614 static void sysbus_esp_hard_reset(DeviceState *dev)
1615 {
1616 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1617 ESPState *s = ESP(&sysbus->esp);
1618
1619 esp_hard_reset(s);
1620 }
1621
sysbus_esp_init(Object * obj)1622 static void sysbus_esp_init(Object *obj)
1623 {
1624 SysBusESPState *sysbus = SYSBUS_ESP(obj);
1625
1626 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1627 }
1628
1629 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1630 .name = "sysbusespscsi",
1631 .version_id = 2,
1632 .minimum_version_id = 1,
1633 .pre_save = esp_pre_save,
1634 .fields = (const VMStateField[]) {
1635 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1636 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1637 VMSTATE_END_OF_LIST()
1638 }
1639 };
1640
sysbus_esp_class_init(ObjectClass * klass,const void * data)1641 static void sysbus_esp_class_init(ObjectClass *klass, const void *data)
1642 {
1643 DeviceClass *dc = DEVICE_CLASS(klass);
1644
1645 dc->realize = sysbus_esp_realize;
1646 device_class_set_legacy_reset(dc, sysbus_esp_hard_reset);
1647 dc->vmsd = &vmstate_sysbus_esp_scsi;
1648 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1649 }
1650
esp_finalize(Object * obj)1651 static void esp_finalize(Object *obj)
1652 {
1653 ESPState *s = ESP(obj);
1654
1655 fifo8_destroy(&s->fifo);
1656 fifo8_destroy(&s->cmdfifo);
1657 }
1658
esp_init(Object * obj)1659 static void esp_init(Object *obj)
1660 {
1661 ESPState *s = ESP(obj);
1662
1663 fifo8_create(&s->fifo, ESP_FIFO_SZ);
1664 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1665 }
1666
esp_class_init(ObjectClass * klass,const void * data)1667 static void esp_class_init(ObjectClass *klass, const void *data)
1668 {
1669 DeviceClass *dc = DEVICE_CLASS(klass);
1670
1671 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1672 dc->user_creatable = false;
1673 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1674 }
1675
1676 static const TypeInfo esp_info_types[] = {
1677 {
1678 .name = TYPE_SYSBUS_ESP,
1679 .parent = TYPE_SYS_BUS_DEVICE,
1680 .instance_init = sysbus_esp_init,
1681 .instance_size = sizeof(SysBusESPState),
1682 .class_init = sysbus_esp_class_init,
1683 },
1684 {
1685 .name = TYPE_ESP,
1686 .parent = TYPE_DEVICE,
1687 .instance_init = esp_init,
1688 .instance_finalize = esp_finalize,
1689 .instance_size = sizeof(ESPState),
1690 .class_init = esp_class_init,
1691 },
1692 };
1693
1694 DEFINE_TYPES(esp_info_types)
1695