1 /*
2 * QEMU ESP/NCR53C9x emulation
3 *
4 * Copyright (c) 2005-2006 Fabrice Bellard
5 * Copyright (c) 2012 Herve Poussineau
6 * Copyright (c) 2023 Mark Cave-Ayland
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 * THE SOFTWARE.
25 */
26
27 #include "qemu/osdep.h"
28 #include "hw/sysbus.h"
29 #include "migration/vmstate.h"
30 #include "hw/irq.h"
31 #include "hw/scsi/esp.h"
32 #include "trace.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
35
36 /*
37 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
38 * also produced as NCR89C100. See
39 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
40 * and
41 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
42 *
43 * On Macintosh Quadra it is a NCR53C96.
44 */
45
esp_raise_irq(ESPState * s)46 static void esp_raise_irq(ESPState *s)
47 {
48 if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
49 s->rregs[ESP_RSTAT] |= STAT_INT;
50 qemu_irq_raise(s->irq);
51 trace_esp_raise_irq();
52 }
53 }
54
esp_lower_irq(ESPState * s)55 static void esp_lower_irq(ESPState *s)
56 {
57 if (s->rregs[ESP_RSTAT] & STAT_INT) {
58 s->rregs[ESP_RSTAT] &= ~STAT_INT;
59 qemu_irq_lower(s->irq);
60 trace_esp_lower_irq();
61 }
62 }
63
esp_raise_drq(ESPState * s)64 static void esp_raise_drq(ESPState *s)
65 {
66 if (!(s->drq_state)) {
67 qemu_irq_raise(s->drq_irq);
68 trace_esp_raise_drq();
69 s->drq_state = true;
70 }
71 }
72
esp_lower_drq(ESPState * s)73 static void esp_lower_drq(ESPState *s)
74 {
75 if (s->drq_state) {
76 qemu_irq_lower(s->drq_irq);
77 trace_esp_lower_drq();
78 s->drq_state = false;
79 }
80 }
81
82 static const char *esp_phase_names[8] = {
83 "DATA OUT", "DATA IN", "COMMAND", "STATUS",
84 "(reserved)", "(reserved)", "MESSAGE OUT", "MESSAGE IN"
85 };
86
esp_set_phase(ESPState * s,uint8_t phase)87 static void esp_set_phase(ESPState *s, uint8_t phase)
88 {
89 s->rregs[ESP_RSTAT] &= ~7;
90 s->rregs[ESP_RSTAT] |= phase;
91
92 trace_esp_set_phase(esp_phase_names[phase]);
93 }
94
esp_get_phase(ESPState * s)95 static uint8_t esp_get_phase(ESPState *s)
96 {
97 return s->rregs[ESP_RSTAT] & 7;
98 }
99
esp_dma_enable(ESPState * s,int irq,int level)100 void esp_dma_enable(ESPState *s, int irq, int level)
101 {
102 if (level) {
103 s->dma_enabled = 1;
104 trace_esp_dma_enable();
105 if (s->dma_cb) {
106 s->dma_cb(s);
107 s->dma_cb = NULL;
108 }
109 } else {
110 trace_esp_dma_disable();
111 s->dma_enabled = 0;
112 }
113 }
114
esp_request_cancelled(SCSIRequest * req)115 void esp_request_cancelled(SCSIRequest *req)
116 {
117 ESPState *s = req->hba_private;
118
119 if (req == s->current_req) {
120 scsi_req_unref(s->current_req);
121 s->current_req = NULL;
122 s->current_dev = NULL;
123 s->async_len = 0;
124 }
125 }
126
esp_update_drq(ESPState * s)127 static void esp_update_drq(ESPState *s)
128 {
129 bool to_device;
130
131 switch (esp_get_phase(s)) {
132 case STAT_MO:
133 case STAT_CD:
134 case STAT_DO:
135 to_device = true;
136 break;
137
138 case STAT_DI:
139 case STAT_ST:
140 case STAT_MI:
141 to_device = false;
142 break;
143
144 default:
145 return;
146 }
147
148 if (s->dma) {
149 /* DMA request so update DRQ according to transfer direction */
150 if (to_device) {
151 if (fifo8_num_free(&s->fifo) < 2) {
152 esp_lower_drq(s);
153 } else {
154 esp_raise_drq(s);
155 }
156 } else {
157 if (fifo8_num_used(&s->fifo) < 2) {
158 esp_lower_drq(s);
159 } else {
160 esp_raise_drq(s);
161 }
162 }
163 } else {
164 /* Not a DMA request */
165 esp_lower_drq(s);
166 }
167 }
168
esp_fifo_push(ESPState * s,uint8_t val)169 static void esp_fifo_push(ESPState *s, uint8_t val)
170 {
171 if (fifo8_num_used(&s->fifo) == s->fifo.capacity) {
172 trace_esp_error_fifo_overrun();
173 } else {
174 fifo8_push(&s->fifo, val);
175 }
176
177 esp_update_drq(s);
178 }
179
esp_fifo_push_buf(ESPState * s,uint8_t * buf,int len)180 static void esp_fifo_push_buf(ESPState *s, uint8_t *buf, int len)
181 {
182 fifo8_push_all(&s->fifo, buf, len);
183 esp_update_drq(s);
184 }
185
esp_fifo_pop(ESPState * s)186 static uint8_t esp_fifo_pop(ESPState *s)
187 {
188 uint8_t val;
189
190 if (fifo8_is_empty(&s->fifo)) {
191 val = 0;
192 } else {
193 val = fifo8_pop(&s->fifo);
194 }
195
196 esp_update_drq(s);
197 return val;
198 }
199
esp_fifo_pop_buf(ESPState * s,uint8_t * dest,int maxlen)200 static uint32_t esp_fifo_pop_buf(ESPState *s, uint8_t *dest, int maxlen)
201 {
202 uint32_t len = fifo8_pop_buf(&s->fifo, dest, maxlen);
203
204 esp_update_drq(s);
205 return len;
206 }
207
esp_get_tc(ESPState * s)208 static uint32_t esp_get_tc(ESPState *s)
209 {
210 uint32_t dmalen;
211
212 dmalen = s->rregs[ESP_TCLO];
213 dmalen |= s->rregs[ESP_TCMID] << 8;
214 dmalen |= s->rregs[ESP_TCHI] << 16;
215
216 return dmalen;
217 }
218
esp_set_tc(ESPState * s,uint32_t dmalen)219 static void esp_set_tc(ESPState *s, uint32_t dmalen)
220 {
221 uint32_t old_tc = esp_get_tc(s);
222
223 s->rregs[ESP_TCLO] = dmalen;
224 s->rregs[ESP_TCMID] = dmalen >> 8;
225 s->rregs[ESP_TCHI] = dmalen >> 16;
226
227 if (old_tc && dmalen == 0) {
228 s->rregs[ESP_RSTAT] |= STAT_TC;
229 }
230 }
231
esp_get_stc(ESPState * s)232 static uint32_t esp_get_stc(ESPState *s)
233 {
234 uint32_t dmalen;
235
236 dmalen = s->wregs[ESP_TCLO];
237 dmalen |= s->wregs[ESP_TCMID] << 8;
238 dmalen |= s->wregs[ESP_TCHI] << 16;
239
240 return dmalen;
241 }
242
esp_pdma_read(ESPState * s)243 static uint8_t esp_pdma_read(ESPState *s)
244 {
245 uint8_t val;
246
247 val = esp_fifo_pop(s);
248 return val;
249 }
250
esp_pdma_write(ESPState * s,uint8_t val)251 static void esp_pdma_write(ESPState *s, uint8_t val)
252 {
253 uint32_t dmalen = esp_get_tc(s);
254
255 esp_fifo_push(s, val);
256
257 if (dmalen && s->drq_state) {
258 dmalen--;
259 esp_set_tc(s, dmalen);
260 }
261 }
262
esp_select(ESPState * s)263 static int esp_select(ESPState *s)
264 {
265 int target;
266
267 target = s->wregs[ESP_WBUSID] & BUSID_DID;
268
269 s->ti_size = 0;
270 s->rregs[ESP_RSEQ] = SEQ_0;
271
272 if (s->current_req) {
273 /* Started a new command before the old one finished. Cancel it. */
274 scsi_req_cancel(s->current_req);
275 }
276
277 s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
278 if (!s->current_dev) {
279 /* No such drive */
280 s->rregs[ESP_RSTAT] = 0;
281 s->rregs[ESP_RINTR] = INTR_DC;
282 esp_raise_irq(s);
283 return -1;
284 }
285
286 /*
287 * Note that we deliberately don't raise the IRQ here: this will be done
288 * either in esp_transfer_data() or esp_command_complete()
289 */
290 return 0;
291 }
292
293 static void esp_do_dma(ESPState *s);
294 static void esp_do_nodma(ESPState *s);
295
do_command_phase(ESPState * s)296 static void do_command_phase(ESPState *s)
297 {
298 uint32_t cmdlen;
299 int32_t datalen;
300 SCSIDevice *current_lun;
301 uint8_t buf[ESP_CMDFIFO_SZ];
302
303 trace_esp_do_command_phase(s->lun);
304 cmdlen = fifo8_num_used(&s->cmdfifo);
305 if (!cmdlen || !s->current_dev) {
306 return;
307 }
308 fifo8_pop_buf(&s->cmdfifo, buf, cmdlen);
309
310 current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
311 if (!current_lun) {
312 /* No such drive */
313 s->rregs[ESP_RSTAT] = 0;
314 s->rregs[ESP_RINTR] = INTR_DC;
315 s->rregs[ESP_RSEQ] = SEQ_0;
316 esp_raise_irq(s);
317 return;
318 }
319
320 s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
321 datalen = scsi_req_enqueue(s->current_req);
322 s->ti_size = datalen;
323 fifo8_reset(&s->cmdfifo);
324 s->data_ready = false;
325 if (datalen != 0) {
326 /*
327 * Switch to DATA phase but wait until initial data xfer is
328 * complete before raising the command completion interrupt
329 */
330 if (datalen > 0) {
331 esp_set_phase(s, STAT_DI);
332 } else {
333 esp_set_phase(s, STAT_DO);
334 }
335 scsi_req_continue(s->current_req);
336 return;
337 }
338 }
339
do_message_phase(ESPState * s)340 static void do_message_phase(ESPState *s)
341 {
342 if (s->cmdfifo_cdb_offset) {
343 uint8_t message = fifo8_is_empty(&s->cmdfifo) ? 0 :
344 fifo8_pop(&s->cmdfifo);
345
346 trace_esp_do_identify(message);
347 s->lun = message & 7;
348 s->cmdfifo_cdb_offset--;
349 }
350
351 /* Ignore extended messages for now */
352 if (s->cmdfifo_cdb_offset) {
353 int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
354 fifo8_drop(&s->cmdfifo, len);
355 s->cmdfifo_cdb_offset = 0;
356 }
357 }
358
do_cmd(ESPState * s)359 static void do_cmd(ESPState *s)
360 {
361 do_message_phase(s);
362 assert(s->cmdfifo_cdb_offset == 0);
363 do_command_phase(s);
364 }
365
handle_satn(ESPState * s)366 static void handle_satn(ESPState *s)
367 {
368 if (s->dma && !s->dma_enabled) {
369 s->dma_cb = handle_satn;
370 return;
371 }
372
373 if (esp_select(s) < 0) {
374 return;
375 }
376
377 esp_set_phase(s, STAT_MO);
378
379 if (s->dma) {
380 esp_do_dma(s);
381 } else {
382 esp_do_nodma(s);
383 }
384 }
385
handle_s_without_atn(ESPState * s)386 static void handle_s_without_atn(ESPState *s)
387 {
388 if (s->dma && !s->dma_enabled) {
389 s->dma_cb = handle_s_without_atn;
390 return;
391 }
392
393 if (esp_select(s) < 0) {
394 return;
395 }
396
397 esp_set_phase(s, STAT_CD);
398 s->cmdfifo_cdb_offset = 0;
399
400 if (s->dma) {
401 esp_do_dma(s);
402 } else {
403 esp_do_nodma(s);
404 }
405 }
406
handle_satn_stop(ESPState * s)407 static void handle_satn_stop(ESPState *s)
408 {
409 if (s->dma && !s->dma_enabled) {
410 s->dma_cb = handle_satn_stop;
411 return;
412 }
413
414 if (esp_select(s) < 0) {
415 return;
416 }
417
418 esp_set_phase(s, STAT_MO);
419 s->cmdfifo_cdb_offset = 0;
420
421 if (s->dma) {
422 esp_do_dma(s);
423 } else {
424 esp_do_nodma(s);
425 }
426 }
427
handle_pad(ESPState * s)428 static void handle_pad(ESPState *s)
429 {
430 if (s->dma) {
431 esp_do_dma(s);
432 } else {
433 esp_do_nodma(s);
434 }
435 }
436
write_response(ESPState * s)437 static void write_response(ESPState *s)
438 {
439 trace_esp_write_response(s->status);
440
441 if (s->dma) {
442 esp_do_dma(s);
443 } else {
444 esp_do_nodma(s);
445 }
446 }
447
esp_cdb_ready(ESPState * s)448 static bool esp_cdb_ready(ESPState *s)
449 {
450 int len = fifo8_num_used(&s->cmdfifo) - s->cmdfifo_cdb_offset;
451 const uint8_t *pbuf;
452 uint32_t n;
453 int cdblen;
454
455 if (len <= 0) {
456 return false;
457 }
458
459 pbuf = fifo8_peek_bufptr(&s->cmdfifo, len, &n);
460 if (n < len) {
461 /*
462 * In normal use the cmdfifo should never wrap, but include this check
463 * to prevent a malicious guest from reading past the end of the
464 * cmdfifo data buffer below
465 */
466 return false;
467 }
468
469 cdblen = scsi_cdb_length((uint8_t *)&pbuf[s->cmdfifo_cdb_offset]);
470
471 return cdblen < 0 ? false : (len >= cdblen);
472 }
473
esp_dma_ti_check(ESPState * s)474 static void esp_dma_ti_check(ESPState *s)
475 {
476 if (esp_get_tc(s) == 0 && fifo8_num_used(&s->fifo) < 2) {
477 s->rregs[ESP_RINTR] |= INTR_BS;
478 esp_raise_irq(s);
479 }
480 }
481
esp_do_dma(ESPState * s)482 static void esp_do_dma(ESPState *s)
483 {
484 uint32_t len, cmdlen;
485 uint8_t buf[ESP_CMDFIFO_SZ];
486
487 len = esp_get_tc(s);
488
489 switch (esp_get_phase(s)) {
490 case STAT_MO:
491 if (s->dma_memory_read) {
492 len = MIN(len, fifo8_num_free(&s->cmdfifo));
493 s->dma_memory_read(s->dma_opaque, buf, len);
494 esp_set_tc(s, esp_get_tc(s) - len);
495 } else {
496 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
497 len = MIN(fifo8_num_free(&s->cmdfifo), len);
498 }
499
500 fifo8_push_all(&s->cmdfifo, buf, len);
501 s->cmdfifo_cdb_offset += len;
502
503 switch (s->rregs[ESP_CMD]) {
504 case CMD_SELATN | CMD_DMA:
505 if (fifo8_num_used(&s->cmdfifo) >= 1) {
506 /* First byte received, switch to command phase */
507 esp_set_phase(s, STAT_CD);
508 s->rregs[ESP_RSEQ] = SEQ_CD;
509 s->cmdfifo_cdb_offset = 1;
510
511 if (fifo8_num_used(&s->cmdfifo) > 1) {
512 /* Process any additional command phase data */
513 esp_do_dma(s);
514 }
515 }
516 break;
517
518 case CMD_SELATNS | CMD_DMA:
519 if (fifo8_num_used(&s->cmdfifo) == 1) {
520 /* First byte received, stop in message out phase */
521 s->rregs[ESP_RSEQ] = SEQ_MO;
522 s->cmdfifo_cdb_offset = 1;
523
524 /* Raise command completion interrupt */
525 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
526 esp_raise_irq(s);
527 }
528 break;
529
530 case CMD_TI | CMD_DMA:
531 /* ATN remains asserted until TC == 0 */
532 if (esp_get_tc(s) == 0) {
533 esp_set_phase(s, STAT_CD);
534 s->rregs[ESP_CMD] = 0;
535 s->rregs[ESP_RINTR] |= INTR_BS;
536 esp_raise_irq(s);
537 }
538 break;
539 }
540 break;
541
542 case STAT_CD:
543 cmdlen = fifo8_num_used(&s->cmdfifo);
544 trace_esp_do_dma(cmdlen, len);
545 if (s->dma_memory_read) {
546 len = MIN(len, fifo8_num_free(&s->cmdfifo));
547 s->dma_memory_read(s->dma_opaque, buf, len);
548 fifo8_push_all(&s->cmdfifo, buf, len);
549 esp_set_tc(s, esp_get_tc(s) - len);
550 } else {
551 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
552 len = MIN(fifo8_num_free(&s->cmdfifo), len);
553 fifo8_push_all(&s->cmdfifo, buf, len);
554 }
555 trace_esp_handle_ti_cmd(cmdlen);
556 s->ti_size = 0;
557 if (esp_get_tc(s) == 0) {
558 /* Command has been received */
559 do_cmd(s);
560 }
561 break;
562
563 case STAT_DO:
564 if (!s->current_req) {
565 return;
566 }
567 if (s->async_len == 0 && esp_get_tc(s)) {
568 /* Defer until data is available. */
569 return;
570 }
571 if (len > s->async_len) {
572 len = s->async_len;
573 }
574
575 switch (s->rregs[ESP_CMD]) {
576 case CMD_TI | CMD_DMA:
577 if (s->dma_memory_read) {
578 s->dma_memory_read(s->dma_opaque, s->async_buf, len);
579 esp_set_tc(s, esp_get_tc(s) - len);
580 } else {
581 /* Copy FIFO data to device */
582 len = MIN(s->async_len, ESP_FIFO_SZ);
583 len = MIN(len, fifo8_num_used(&s->fifo));
584 len = esp_fifo_pop_buf(s, s->async_buf, len);
585 }
586
587 s->async_buf += len;
588 s->async_len -= len;
589 s->ti_size += len;
590 break;
591
592 case CMD_PAD | CMD_DMA:
593 /* Copy TC zero bytes into the incoming stream */
594 if (!s->dma_memory_read) {
595 len = MIN(s->async_len, ESP_FIFO_SZ);
596 len = MIN(len, fifo8_num_free(&s->fifo));
597 }
598
599 memset(s->async_buf, 0, len);
600
601 s->async_buf += len;
602 s->async_len -= len;
603 s->ti_size += len;
604 break;
605 }
606
607 if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
608 /* Defer until the scsi layer has completed */
609 scsi_req_continue(s->current_req);
610 return;
611 }
612
613 esp_dma_ti_check(s);
614 break;
615
616 case STAT_DI:
617 if (!s->current_req) {
618 return;
619 }
620 if (s->async_len == 0 && esp_get_tc(s)) {
621 /* Defer until data is available. */
622 return;
623 }
624 if (len > s->async_len) {
625 len = s->async_len;
626 }
627
628 switch (s->rregs[ESP_CMD]) {
629 case CMD_TI | CMD_DMA:
630 if (s->dma_memory_write) {
631 s->dma_memory_write(s->dma_opaque, s->async_buf, len);
632 } else {
633 /* Copy device data to FIFO */
634 len = MIN(len, fifo8_num_free(&s->fifo));
635 esp_fifo_push_buf(s, s->async_buf, len);
636 }
637
638 s->async_buf += len;
639 s->async_len -= len;
640 s->ti_size -= len;
641 esp_set_tc(s, esp_get_tc(s) - len);
642 break;
643
644 case CMD_PAD | CMD_DMA:
645 /* Drop TC bytes from the incoming stream */
646 if (!s->dma_memory_write) {
647 len = MIN(len, fifo8_num_free(&s->fifo));
648 }
649
650 s->async_buf += len;
651 s->async_len -= len;
652 s->ti_size -= len;
653 esp_set_tc(s, esp_get_tc(s) - len);
654 break;
655 }
656
657 if (s->async_len == 0 && s->ti_size == 0 && esp_get_tc(s)) {
658 /* If the guest underflows TC then terminate SCSI request */
659 scsi_req_continue(s->current_req);
660 return;
661 }
662
663 if (s->async_len == 0 && fifo8_num_used(&s->fifo) < 2) {
664 /* Defer until the scsi layer has completed */
665 scsi_req_continue(s->current_req);
666 return;
667 }
668
669 esp_dma_ti_check(s);
670 break;
671
672 case STAT_ST:
673 switch (s->rregs[ESP_CMD]) {
674 case CMD_ICCS | CMD_DMA:
675 len = MIN(len, 1);
676
677 if (len) {
678 buf[0] = s->status;
679
680 if (s->dma_memory_write) {
681 s->dma_memory_write(s->dma_opaque, buf, len);
682 } else {
683 esp_fifo_push_buf(s, buf, len);
684 }
685
686 esp_set_tc(s, esp_get_tc(s) - len);
687 esp_set_phase(s, STAT_MI);
688
689 if (esp_get_tc(s) > 0) {
690 /* Process any message in phase data */
691 esp_do_dma(s);
692 }
693 }
694 break;
695
696 default:
697 /* Consume remaining data if the guest underflows TC */
698 if (fifo8_num_used(&s->fifo) < 2) {
699 s->rregs[ESP_RINTR] |= INTR_BS;
700 esp_raise_irq(s);
701 }
702 break;
703 }
704 break;
705
706 case STAT_MI:
707 switch (s->rregs[ESP_CMD]) {
708 case CMD_ICCS | CMD_DMA:
709 len = MIN(len, 1);
710
711 if (len) {
712 buf[0] = 0;
713
714 if (s->dma_memory_write) {
715 s->dma_memory_write(s->dma_opaque, buf, len);
716 } else {
717 esp_fifo_push_buf(s, buf, len);
718 }
719
720 esp_set_tc(s, esp_get_tc(s) - len);
721
722 /* Raise end of command interrupt */
723 s->rregs[ESP_RINTR] |= INTR_FC;
724 esp_raise_irq(s);
725 }
726 break;
727 }
728 break;
729 }
730 }
731
esp_nodma_ti_dataout(ESPState * s)732 static void esp_nodma_ti_dataout(ESPState *s)
733 {
734 int len;
735
736 if (!s->current_req) {
737 return;
738 }
739 if (s->async_len == 0) {
740 /* Defer until data is available. */
741 return;
742 }
743 len = MIN(s->async_len, ESP_FIFO_SZ);
744 len = MIN(len, fifo8_num_used(&s->fifo));
745 esp_fifo_pop_buf(s, s->async_buf, len);
746 s->async_buf += len;
747 s->async_len -= len;
748 s->ti_size += len;
749
750 if (s->async_len == 0) {
751 scsi_req_continue(s->current_req);
752 return;
753 }
754
755 s->rregs[ESP_RINTR] |= INTR_BS;
756 esp_raise_irq(s);
757 }
758
esp_do_nodma(ESPState * s)759 static void esp_do_nodma(ESPState *s)
760 {
761 uint8_t buf[ESP_FIFO_SZ];
762 uint32_t cmdlen;
763 int len;
764
765 switch (esp_get_phase(s)) {
766 case STAT_MO:
767 switch (s->rregs[ESP_CMD]) {
768 case CMD_SELATN:
769 /* Copy FIFO into cmdfifo */
770 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
771 len = MIN(fifo8_num_free(&s->cmdfifo), len);
772 fifo8_push_all(&s->cmdfifo, buf, len);
773
774 if (fifo8_num_used(&s->cmdfifo) >= 1) {
775 /* First byte received, switch to command phase */
776 esp_set_phase(s, STAT_CD);
777 s->rregs[ESP_RSEQ] = SEQ_CD;
778 s->cmdfifo_cdb_offset = 1;
779
780 if (fifo8_num_used(&s->cmdfifo) > 1) {
781 /* Process any additional command phase data */
782 esp_do_nodma(s);
783 }
784 }
785 break;
786
787 case CMD_SELATNS:
788 /* Copy one byte from FIFO into cmdfifo */
789 len = esp_fifo_pop_buf(s, buf,
790 MIN(fifo8_num_used(&s->fifo), 1));
791 len = MIN(fifo8_num_free(&s->cmdfifo), len);
792 fifo8_push_all(&s->cmdfifo, buf, len);
793
794 if (fifo8_num_used(&s->cmdfifo) >= 1) {
795 /* First byte received, stop in message out phase */
796 s->rregs[ESP_RSEQ] = SEQ_MO;
797 s->cmdfifo_cdb_offset = 1;
798
799 /* Raise command completion interrupt */
800 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
801 esp_raise_irq(s);
802 }
803 break;
804
805 case CMD_TI:
806 /* Copy FIFO into cmdfifo */
807 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
808 len = MIN(fifo8_num_free(&s->cmdfifo), len);
809 fifo8_push_all(&s->cmdfifo, buf, len);
810
811 /* ATN remains asserted until FIFO empty */
812 s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
813 esp_set_phase(s, STAT_CD);
814 s->rregs[ESP_CMD] = 0;
815 s->rregs[ESP_RINTR] |= INTR_BS;
816 esp_raise_irq(s);
817 break;
818 }
819 break;
820
821 case STAT_CD:
822 switch (s->rregs[ESP_CMD]) {
823 case CMD_TI:
824 /* Copy FIFO into cmdfifo */
825 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
826 len = MIN(fifo8_num_free(&s->cmdfifo), len);
827 fifo8_push_all(&s->cmdfifo, buf, len);
828
829 cmdlen = fifo8_num_used(&s->cmdfifo);
830 trace_esp_handle_ti_cmd(cmdlen);
831
832 /* CDB may be transferred in one or more TI commands */
833 if (esp_cdb_ready(s)) {
834 /* Command has been received */
835 do_cmd(s);
836 } else {
837 /*
838 * If data was transferred from the FIFO then raise bus
839 * service interrupt to indicate transfer complete. Otherwise
840 * defer until the next FIFO write.
841 */
842 if (len) {
843 /* Raise interrupt to indicate transfer complete */
844 s->rregs[ESP_RINTR] |= INTR_BS;
845 esp_raise_irq(s);
846 }
847 }
848 break;
849
850 case CMD_SEL | CMD_DMA:
851 case CMD_SELATN | CMD_DMA:
852 /* Copy FIFO into cmdfifo */
853 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
854 len = MIN(fifo8_num_free(&s->cmdfifo), len);
855 fifo8_push_all(&s->cmdfifo, buf, len);
856
857 /* Handle when DMA transfer is terminated by non-DMA FIFO write */
858 if (esp_cdb_ready(s)) {
859 /* Command has been received */
860 do_cmd(s);
861 }
862 break;
863
864 case CMD_SEL:
865 case CMD_SELATN:
866 /* FIFO already contain entire CDB: copy to cmdfifo and execute */
867 len = esp_fifo_pop_buf(s, buf, fifo8_num_used(&s->fifo));
868 len = MIN(fifo8_num_free(&s->cmdfifo), len);
869 fifo8_push_all(&s->cmdfifo, buf, len);
870
871 do_cmd(s);
872 break;
873 }
874 break;
875
876 case STAT_DO:
877 /* Accumulate data in FIFO until non-DMA TI is executed */
878 break;
879
880 case STAT_DI:
881 if (!s->current_req) {
882 return;
883 }
884 if (s->async_len == 0) {
885 /* Defer until data is available. */
886 return;
887 }
888 if (fifo8_is_empty(&s->fifo)) {
889 esp_fifo_push(s, s->async_buf[0]);
890 s->async_buf++;
891 s->async_len--;
892 s->ti_size--;
893 }
894
895 if (s->async_len == 0) {
896 scsi_req_continue(s->current_req);
897 return;
898 }
899
900 /* If preloading the FIFO, defer until TI command issued */
901 if (s->rregs[ESP_CMD] != CMD_TI) {
902 return;
903 }
904
905 s->rregs[ESP_RINTR] |= INTR_BS;
906 esp_raise_irq(s);
907 break;
908
909 case STAT_ST:
910 switch (s->rregs[ESP_CMD]) {
911 case CMD_ICCS:
912 esp_fifo_push(s, s->status);
913 esp_set_phase(s, STAT_MI);
914
915 /* Process any message in phase data */
916 esp_do_nodma(s);
917 break;
918 }
919 break;
920
921 case STAT_MI:
922 switch (s->rregs[ESP_CMD]) {
923 case CMD_ICCS:
924 esp_fifo_push(s, 0);
925
926 /* Raise end of command interrupt */
927 s->rregs[ESP_RINTR] |= INTR_FC;
928 esp_raise_irq(s);
929 break;
930 }
931 break;
932 }
933 }
934
esp_command_complete(SCSIRequest * req,size_t resid)935 void esp_command_complete(SCSIRequest *req, size_t resid)
936 {
937 ESPState *s = req->hba_private;
938 int to_device = (esp_get_phase(s) == STAT_DO);
939
940 trace_esp_command_complete();
941
942 /*
943 * Non-DMA transfers from the target will leave the last byte in
944 * the FIFO so don't reset ti_size in this case
945 */
946 if (s->dma || to_device) {
947 if (s->ti_size != 0) {
948 trace_esp_command_complete_unexpected();
949 }
950 }
951
952 s->async_len = 0;
953 if (req->status) {
954 trace_esp_command_complete_fail();
955 }
956 s->status = req->status;
957
958 /*
959 * Switch to status phase. For non-DMA transfers from the target the last
960 * byte is still in the FIFO
961 */
962 s->ti_size = 0;
963
964 switch (s->rregs[ESP_CMD]) {
965 case CMD_SEL | CMD_DMA:
966 case CMD_SEL:
967 case CMD_SELATN | CMD_DMA:
968 case CMD_SELATN:
969 /*
970 * No data phase for sequencer command so raise deferred bus service
971 * and function complete interrupt
972 */
973 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
974 s->rregs[ESP_RSEQ] = SEQ_CD;
975 break;
976
977 case CMD_TI | CMD_DMA:
978 case CMD_TI:
979 s->rregs[ESP_CMD] = 0;
980 break;
981 }
982
983 /* Raise bus service interrupt to indicate change to STATUS phase */
984 esp_set_phase(s, STAT_ST);
985 s->rregs[ESP_RINTR] |= INTR_BS;
986 esp_raise_irq(s);
987
988 if (s->current_req) {
989 scsi_req_unref(s->current_req);
990 s->current_req = NULL;
991 s->current_dev = NULL;
992 }
993 }
994
esp_transfer_data(SCSIRequest * req,uint32_t len)995 void esp_transfer_data(SCSIRequest *req, uint32_t len)
996 {
997 ESPState *s = req->hba_private;
998 uint32_t dmalen = esp_get_tc(s);
999
1000 trace_esp_transfer_data(dmalen, s->ti_size);
1001 s->async_len = len;
1002 s->async_buf = scsi_req_get_buf(req);
1003
1004 if (!s->data_ready) {
1005 s->data_ready = true;
1006
1007 switch (s->rregs[ESP_CMD]) {
1008 case CMD_SEL | CMD_DMA:
1009 case CMD_SEL:
1010 case CMD_SELATN | CMD_DMA:
1011 case CMD_SELATN:
1012 /*
1013 * Initial incoming data xfer is complete for sequencer command
1014 * so raise deferred bus service and function complete interrupt
1015 */
1016 s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
1017 s->rregs[ESP_RSEQ] = SEQ_CD;
1018 break;
1019
1020 case CMD_SELATNS | CMD_DMA:
1021 case CMD_SELATNS:
1022 /*
1023 * Initial incoming data xfer is complete so raise command
1024 * completion interrupt
1025 */
1026 s->rregs[ESP_RINTR] |= INTR_BS;
1027 s->rregs[ESP_RSEQ] = SEQ_MO;
1028 break;
1029
1030 case CMD_TI | CMD_DMA:
1031 case CMD_TI:
1032 /*
1033 * Bus service interrupt raised because of initial change to
1034 * DATA phase
1035 */
1036 s->rregs[ESP_CMD] = 0;
1037 s->rregs[ESP_RINTR] |= INTR_BS;
1038 break;
1039 }
1040
1041 esp_raise_irq(s);
1042 }
1043
1044 /*
1045 * Always perform the initial transfer upon reception of the next TI
1046 * command to ensure the DMA/non-DMA status of the command is correct.
1047 * It is not possible to use s->dma directly in the section below as
1048 * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
1049 * async data transfer is delayed then s->dma is set incorrectly.
1050 */
1051
1052 if (s->rregs[ESP_CMD] == (CMD_TI | CMD_DMA)) {
1053 /* When the SCSI layer returns more data, raise deferred INTR_BS */
1054 esp_dma_ti_check(s);
1055
1056 esp_do_dma(s);
1057 } else if (s->rregs[ESP_CMD] == CMD_TI) {
1058 esp_do_nodma(s);
1059 }
1060 }
1061
handle_ti(ESPState * s)1062 static void handle_ti(ESPState *s)
1063 {
1064 uint32_t dmalen;
1065
1066 if (s->dma && !s->dma_enabled) {
1067 s->dma_cb = handle_ti;
1068 return;
1069 }
1070
1071 if (s->dma) {
1072 dmalen = esp_get_tc(s);
1073 trace_esp_handle_ti(dmalen);
1074 esp_do_dma(s);
1075 } else {
1076 trace_esp_handle_ti(s->ti_size);
1077 esp_do_nodma(s);
1078
1079 if (esp_get_phase(s) == STAT_DO) {
1080 esp_nodma_ti_dataout(s);
1081 }
1082 }
1083 }
1084
esp_hard_reset(ESPState * s)1085 void esp_hard_reset(ESPState *s)
1086 {
1087 memset(s->rregs, 0, ESP_REGS);
1088 memset(s->wregs, 0, ESP_REGS);
1089 s->tchi_written = 0;
1090 s->ti_size = 0;
1091 s->async_len = 0;
1092 fifo8_reset(&s->fifo);
1093 fifo8_reset(&s->cmdfifo);
1094 s->dma = 0;
1095 s->dma_cb = NULL;
1096
1097 s->rregs[ESP_CFG1] = 7;
1098 }
1099
esp_soft_reset(ESPState * s)1100 static void esp_soft_reset(ESPState *s)
1101 {
1102 qemu_irq_lower(s->irq);
1103 qemu_irq_lower(s->drq_irq);
1104 esp_hard_reset(s);
1105 }
1106
esp_bus_reset(ESPState * s)1107 static void esp_bus_reset(ESPState *s)
1108 {
1109 bus_cold_reset(BUS(&s->bus));
1110 }
1111
parent_esp_reset(ESPState * s,int irq,int level)1112 static void parent_esp_reset(ESPState *s, int irq, int level)
1113 {
1114 if (level) {
1115 esp_soft_reset(s);
1116 }
1117 }
1118
esp_run_cmd(ESPState * s)1119 static void esp_run_cmd(ESPState *s)
1120 {
1121 uint8_t cmd = s->rregs[ESP_CMD];
1122
1123 if (cmd & CMD_DMA) {
1124 s->dma = 1;
1125 /* Reload DMA counter. */
1126 if (esp_get_stc(s) == 0) {
1127 esp_set_tc(s, 0x10000);
1128 } else {
1129 esp_set_tc(s, esp_get_stc(s));
1130 }
1131 } else {
1132 s->dma = 0;
1133 }
1134 switch (cmd & CMD_CMD) {
1135 case CMD_NOP:
1136 trace_esp_mem_writeb_cmd_nop(cmd);
1137 break;
1138 case CMD_FLUSH:
1139 trace_esp_mem_writeb_cmd_flush(cmd);
1140 fifo8_reset(&s->fifo);
1141 break;
1142 case CMD_RESET:
1143 trace_esp_mem_writeb_cmd_reset(cmd);
1144 esp_soft_reset(s);
1145 break;
1146 case CMD_BUSRESET:
1147 trace_esp_mem_writeb_cmd_bus_reset(cmd);
1148 esp_bus_reset(s);
1149 if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
1150 s->rregs[ESP_RINTR] |= INTR_RST;
1151 esp_raise_irq(s);
1152 }
1153 break;
1154 case CMD_TI:
1155 trace_esp_mem_writeb_cmd_ti(cmd);
1156 handle_ti(s);
1157 break;
1158 case CMD_ICCS:
1159 trace_esp_mem_writeb_cmd_iccs(cmd);
1160 write_response(s);
1161 break;
1162 case CMD_MSGACC:
1163 trace_esp_mem_writeb_cmd_msgacc(cmd);
1164 s->rregs[ESP_RINTR] |= INTR_DC;
1165 s->rregs[ESP_RSEQ] = 0;
1166 s->rregs[ESP_RFLAGS] = 0;
1167 esp_raise_irq(s);
1168 break;
1169 case CMD_PAD:
1170 trace_esp_mem_writeb_cmd_pad(cmd);
1171 handle_pad(s);
1172 break;
1173 case CMD_SATN:
1174 trace_esp_mem_writeb_cmd_satn(cmd);
1175 break;
1176 case CMD_RSTATN:
1177 trace_esp_mem_writeb_cmd_rstatn(cmd);
1178 break;
1179 case CMD_SEL:
1180 trace_esp_mem_writeb_cmd_sel(cmd);
1181 handle_s_without_atn(s);
1182 break;
1183 case CMD_SELATN:
1184 trace_esp_mem_writeb_cmd_selatn(cmd);
1185 handle_satn(s);
1186 break;
1187 case CMD_SELATNS:
1188 trace_esp_mem_writeb_cmd_selatns(cmd);
1189 handle_satn_stop(s);
1190 break;
1191 case CMD_ENSEL:
1192 trace_esp_mem_writeb_cmd_ensel(cmd);
1193 s->rregs[ESP_RINTR] = 0;
1194 break;
1195 case CMD_DISSEL:
1196 trace_esp_mem_writeb_cmd_dissel(cmd);
1197 s->rregs[ESP_RINTR] = 0;
1198 esp_raise_irq(s);
1199 break;
1200 default:
1201 trace_esp_error_unhandled_command(cmd);
1202 break;
1203 }
1204 }
1205
esp_reg_read(ESPState * s,uint32_t saddr)1206 uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
1207 {
1208 uint32_t val;
1209
1210 switch (saddr) {
1211 case ESP_FIFO:
1212 s->rregs[ESP_FIFO] = esp_fifo_pop(s);
1213 val = s->rregs[ESP_FIFO];
1214 break;
1215 case ESP_RINTR:
1216 /*
1217 * Clear sequence step, interrupt register and all status bits
1218 * except TC
1219 */
1220 val = s->rregs[ESP_RINTR];
1221 s->rregs[ESP_RINTR] = 0;
1222 esp_lower_irq(s);
1223 s->rregs[ESP_RSTAT] &= STAT_TC | 7;
1224 /*
1225 * According to the datasheet ESP_RSEQ should be cleared, but as the
1226 * emulation currently defers information transfers to the next TI
1227 * command leave it for now so that pedantic guests such as the old
1228 * Linux 2.6 driver see the correct flags before the next SCSI phase
1229 * transition.
1230 *
1231 * s->rregs[ESP_RSEQ] = SEQ_0;
1232 */
1233 break;
1234 case ESP_TCHI:
1235 /* Return the unique id if the value has never been written */
1236 if (!s->tchi_written) {
1237 val = s->chip_id;
1238 } else {
1239 val = s->rregs[saddr];
1240 }
1241 break;
1242 case ESP_RFLAGS:
1243 /* Bottom 5 bits indicate number of bytes in FIFO */
1244 val = fifo8_num_used(&s->fifo);
1245 break;
1246 default:
1247 val = s->rregs[saddr];
1248 break;
1249 }
1250
1251 trace_esp_mem_readb(saddr, val);
1252 return val;
1253 }
1254
esp_reg_write(ESPState * s,uint32_t saddr,uint64_t val)1255 void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
1256 {
1257 trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
1258 switch (saddr) {
1259 case ESP_TCHI:
1260 s->tchi_written = true;
1261 /* fall through */
1262 case ESP_TCLO:
1263 case ESP_TCMID:
1264 s->rregs[ESP_RSTAT] &= ~STAT_TC;
1265 break;
1266 case ESP_FIFO:
1267 if (!fifo8_is_full(&s->fifo)) {
1268 esp_fifo_push(s, val);
1269 }
1270 esp_do_nodma(s);
1271 break;
1272 case ESP_CMD:
1273 s->rregs[saddr] = val;
1274 esp_run_cmd(s);
1275 break;
1276 case ESP_WBUSID ... ESP_WSYNO:
1277 break;
1278 case ESP_CFG1:
1279 case ESP_CFG2: case ESP_CFG3:
1280 case ESP_RES3: case ESP_RES4:
1281 s->rregs[saddr] = val;
1282 break;
1283 case ESP_WCCF ... ESP_WTEST:
1284 break;
1285 default:
1286 trace_esp_error_invalid_write(val, saddr);
1287 return;
1288 }
1289 s->wregs[saddr] = val;
1290 }
1291
esp_mem_accepts(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)1292 static bool esp_mem_accepts(void *opaque, hwaddr addr,
1293 unsigned size, bool is_write,
1294 MemTxAttrs attrs)
1295 {
1296 return (size == 1) || (is_write && size == 4);
1297 }
1298
esp_is_before_version_5(void * opaque,int version_id)1299 static bool esp_is_before_version_5(void *opaque, int version_id)
1300 {
1301 ESPState *s = ESP(opaque);
1302
1303 version_id = MIN(version_id, s->mig_version_id);
1304 return version_id < 5;
1305 }
1306
esp_is_version_5(void * opaque,int version_id)1307 static bool esp_is_version_5(void *opaque, int version_id)
1308 {
1309 ESPState *s = ESP(opaque);
1310
1311 version_id = MIN(version_id, s->mig_version_id);
1312 return version_id >= 5;
1313 }
1314
esp_is_version_6(void * opaque,int version_id)1315 static bool esp_is_version_6(void *opaque, int version_id)
1316 {
1317 ESPState *s = ESP(opaque);
1318
1319 version_id = MIN(version_id, s->mig_version_id);
1320 return version_id >= 6;
1321 }
1322
esp_is_between_version_5_and_6(void * opaque,int version_id)1323 static bool esp_is_between_version_5_and_6(void *opaque, int version_id)
1324 {
1325 ESPState *s = ESP(opaque);
1326
1327 version_id = MIN(version_id, s->mig_version_id);
1328 return version_id >= 5 && version_id <= 6;
1329 }
1330
esp_pre_save(void * opaque)1331 int esp_pre_save(void *opaque)
1332 {
1333 ESPState *s = ESP(object_resolve_path_component(
1334 OBJECT(opaque), "esp"));
1335
1336 s->mig_version_id = vmstate_esp.version_id;
1337 return 0;
1338 }
1339
esp_post_load(void * opaque,int version_id)1340 static int esp_post_load(void *opaque, int version_id)
1341 {
1342 ESPState *s = ESP(opaque);
1343 int len, i;
1344
1345 version_id = MIN(version_id, s->mig_version_id);
1346
1347 if (version_id < 5) {
1348 esp_set_tc(s, s->mig_dma_left);
1349
1350 /* Migrate ti_buf to fifo */
1351 len = s->mig_ti_wptr - s->mig_ti_rptr;
1352 for (i = 0; i < len; i++) {
1353 fifo8_push(&s->fifo, s->mig_ti_buf[i]);
1354 }
1355
1356 /* Migrate cmdbuf to cmdfifo */
1357 for (i = 0; i < s->mig_cmdlen; i++) {
1358 fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
1359 }
1360 }
1361
1362 s->mig_version_id = vmstate_esp.version_id;
1363 return 0;
1364 }
1365
1366 const VMStateDescription vmstate_esp = {
1367 .name = "esp",
1368 .version_id = 7,
1369 .minimum_version_id = 3,
1370 .post_load = esp_post_load,
1371 .fields = (const VMStateField[]) {
1372 VMSTATE_BUFFER(rregs, ESPState),
1373 VMSTATE_BUFFER(wregs, ESPState),
1374 VMSTATE_INT32(ti_size, ESPState),
1375 VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
1376 VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
1377 VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
1378 VMSTATE_UINT32(status, ESPState),
1379 VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
1380 esp_is_before_version_5),
1381 VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
1382 esp_is_before_version_5),
1383 VMSTATE_UINT32(dma, ESPState),
1384 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
1385 esp_is_before_version_5, 0, 16),
1386 VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
1387 esp_is_before_version_5, 16,
1388 sizeof(typeof_field(ESPState, mig_cmdbuf))),
1389 VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
1390 VMSTATE_UINT32(do_cmd, ESPState),
1391 VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
1392 VMSTATE_BOOL_TEST(data_ready, ESPState, esp_is_version_5),
1393 VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
1394 VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
1395 VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
1396 VMSTATE_UINT8_TEST(mig_ti_cmd, ESPState,
1397 esp_is_between_version_5_and_6),
1398 VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
1399 VMSTATE_BOOL(drq_state, ESPState),
1400 VMSTATE_END_OF_LIST()
1401 },
1402 };
1403
sysbus_esp_mem_write(void * opaque,hwaddr addr,uint64_t val,unsigned int size)1404 static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
1405 uint64_t val, unsigned int size)
1406 {
1407 SysBusESPState *sysbus = opaque;
1408 ESPState *s = ESP(&sysbus->esp);
1409 uint32_t saddr;
1410
1411 saddr = addr >> sysbus->it_shift;
1412 esp_reg_write(s, saddr, val);
1413 }
1414
sysbus_esp_mem_read(void * opaque,hwaddr addr,unsigned int size)1415 static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
1416 unsigned int size)
1417 {
1418 SysBusESPState *sysbus = opaque;
1419 ESPState *s = ESP(&sysbus->esp);
1420 uint32_t saddr;
1421
1422 saddr = addr >> sysbus->it_shift;
1423 return esp_reg_read(s, saddr);
1424 }
1425
1426 static const MemoryRegionOps sysbus_esp_mem_ops = {
1427 .read = sysbus_esp_mem_read,
1428 .write = sysbus_esp_mem_write,
1429 .endianness = DEVICE_NATIVE_ENDIAN,
1430 .valid.accepts = esp_mem_accepts,
1431 };
1432
sysbus_esp_pdma_write(void * opaque,hwaddr addr,uint64_t val,unsigned int size)1433 static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
1434 uint64_t val, unsigned int size)
1435 {
1436 SysBusESPState *sysbus = opaque;
1437 ESPState *s = ESP(&sysbus->esp);
1438
1439 trace_esp_pdma_write(size);
1440
1441 switch (size) {
1442 case 1:
1443 esp_pdma_write(s, val);
1444 break;
1445 case 2:
1446 esp_pdma_write(s, val >> 8);
1447 esp_pdma_write(s, val);
1448 break;
1449 }
1450 esp_do_dma(s);
1451 }
1452
sysbus_esp_pdma_read(void * opaque,hwaddr addr,unsigned int size)1453 static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
1454 unsigned int size)
1455 {
1456 SysBusESPState *sysbus = opaque;
1457 ESPState *s = ESP(&sysbus->esp);
1458 uint64_t val = 0;
1459
1460 trace_esp_pdma_read(size);
1461
1462 switch (size) {
1463 case 1:
1464 val = esp_pdma_read(s);
1465 break;
1466 case 2:
1467 val = esp_pdma_read(s);
1468 val = (val << 8) | esp_pdma_read(s);
1469 break;
1470 }
1471 esp_do_dma(s);
1472 return val;
1473 }
1474
esp_load_request(QEMUFile * f,SCSIRequest * req)1475 static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
1476 {
1477 ESPState *s = container_of(req->bus, ESPState, bus);
1478
1479 scsi_req_ref(req);
1480 s->current_req = req;
1481 return s;
1482 }
1483
1484 static const MemoryRegionOps sysbus_esp_pdma_ops = {
1485 .read = sysbus_esp_pdma_read,
1486 .write = sysbus_esp_pdma_write,
1487 .endianness = DEVICE_NATIVE_ENDIAN,
1488 .valid.min_access_size = 1,
1489 .valid.max_access_size = 4,
1490 .impl.min_access_size = 1,
1491 .impl.max_access_size = 2,
1492 };
1493
1494 static const struct SCSIBusInfo esp_scsi_info = {
1495 .tcq = false,
1496 .max_target = ESP_MAX_DEVS,
1497 .max_lun = 7,
1498
1499 .load_request = esp_load_request,
1500 .transfer_data = esp_transfer_data,
1501 .complete = esp_command_complete,
1502 .cancel = esp_request_cancelled
1503 };
1504
sysbus_esp_gpio_demux(void * opaque,int irq,int level)1505 static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
1506 {
1507 SysBusESPState *sysbus = SYSBUS_ESP(opaque);
1508 ESPState *s = ESP(&sysbus->esp);
1509
1510 switch (irq) {
1511 case 0:
1512 parent_esp_reset(s, irq, level);
1513 break;
1514 case 1:
1515 esp_dma_enable(s, irq, level);
1516 break;
1517 }
1518 }
1519
sysbus_esp_realize(DeviceState * dev,Error ** errp)1520 static void sysbus_esp_realize(DeviceState *dev, Error **errp)
1521 {
1522 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1523 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1524 ESPState *s = ESP(&sysbus->esp);
1525
1526 if (!qdev_realize(DEVICE(s), NULL, errp)) {
1527 return;
1528 }
1529
1530 sysbus_init_irq(sbd, &s->irq);
1531 sysbus_init_irq(sbd, &s->drq_irq);
1532 assert(sysbus->it_shift != -1);
1533
1534 s->chip_id = TCHI_FAS100A;
1535 memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
1536 sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
1537 sysbus_init_mmio(sbd, &sysbus->iomem);
1538 memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
1539 sysbus, "esp-pdma", 4);
1540 sysbus_init_mmio(sbd, &sysbus->pdma);
1541
1542 qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
1543
1544 scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
1545 }
1546
sysbus_esp_hard_reset(DeviceState * dev)1547 static void sysbus_esp_hard_reset(DeviceState *dev)
1548 {
1549 SysBusESPState *sysbus = SYSBUS_ESP(dev);
1550 ESPState *s = ESP(&sysbus->esp);
1551
1552 esp_hard_reset(s);
1553 }
1554
sysbus_esp_init(Object * obj)1555 static void sysbus_esp_init(Object *obj)
1556 {
1557 SysBusESPState *sysbus = SYSBUS_ESP(obj);
1558
1559 object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
1560 }
1561
1562 static const VMStateDescription vmstate_sysbus_esp_scsi = {
1563 .name = "sysbusespscsi",
1564 .version_id = 2,
1565 .minimum_version_id = 1,
1566 .pre_save = esp_pre_save,
1567 .fields = (const VMStateField[]) {
1568 VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
1569 VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
1570 VMSTATE_END_OF_LIST()
1571 }
1572 };
1573
sysbus_esp_class_init(ObjectClass * klass,void * data)1574 static void sysbus_esp_class_init(ObjectClass *klass, void *data)
1575 {
1576 DeviceClass *dc = DEVICE_CLASS(klass);
1577
1578 dc->realize = sysbus_esp_realize;
1579 device_class_set_legacy_reset(dc, sysbus_esp_hard_reset);
1580 dc->vmsd = &vmstate_sysbus_esp_scsi;
1581 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1582 }
1583
esp_finalize(Object * obj)1584 static void esp_finalize(Object *obj)
1585 {
1586 ESPState *s = ESP(obj);
1587
1588 fifo8_destroy(&s->fifo);
1589 fifo8_destroy(&s->cmdfifo);
1590 }
1591
esp_init(Object * obj)1592 static void esp_init(Object *obj)
1593 {
1594 ESPState *s = ESP(obj);
1595
1596 fifo8_create(&s->fifo, ESP_FIFO_SZ);
1597 fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
1598 }
1599
esp_class_init(ObjectClass * klass,void * data)1600 static void esp_class_init(ObjectClass *klass, void *data)
1601 {
1602 DeviceClass *dc = DEVICE_CLASS(klass);
1603
1604 /* internal device for sysbusesp/pciespscsi, not user-creatable */
1605 dc->user_creatable = false;
1606 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1607 }
1608
1609 static const TypeInfo esp_info_types[] = {
1610 {
1611 .name = TYPE_SYSBUS_ESP,
1612 .parent = TYPE_SYS_BUS_DEVICE,
1613 .instance_init = sysbus_esp_init,
1614 .instance_size = sizeof(SysBusESPState),
1615 .class_init = sysbus_esp_class_init,
1616 },
1617 {
1618 .name = TYPE_ESP,
1619 .parent = TYPE_DEVICE,
1620 .instance_init = esp_init,
1621 .instance_finalize = esp_finalize,
1622 .instance_size = sizeof(ESPState),
1623 .class_init = esp_class_init,
1624 },
1625 };
1626
1627 DEFINE_TYPES(esp_info_types)
1628