1 /* 2 * QEMU PowerMac CUDA device support 3 * 4 * Copyright (c) 2004-2007 Fabrice Bellard 5 * Copyright (c) 2007 Jocelyn Mayer 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * of this software and associated documentation files (the "Software"), to deal 9 * in the Software without restriction, including without limitation the rights 10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 * copies of the Software, and to permit persons to whom the Software is 12 * furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 23 * THE SOFTWARE. 24 */ 25 #include "qemu/osdep.h" 26 #include "hw/hw.h" 27 #include "hw/ppc/mac.h" 28 #include "hw/input/adb.h" 29 #include "qemu/timer.h" 30 #include "sysemu/sysemu.h" 31 #include "qemu/cutils.h" 32 #include "qemu/log.h" 33 34 /* XXX: implement all timer modes */ 35 36 /* debug CUDA */ 37 //#define DEBUG_CUDA 38 39 /* debug CUDA packets */ 40 //#define DEBUG_CUDA_PACKET 41 42 #ifdef DEBUG_CUDA 43 #define CUDA_DPRINTF(fmt, ...) \ 44 do { printf("CUDA: " fmt , ## __VA_ARGS__); } while (0) 45 #else 46 #define CUDA_DPRINTF(fmt, ...) 47 #endif 48 49 /* Bits in B data register: all active low */ 50 #define TREQ 0x08 /* Transfer request (input) */ 51 #define TACK 0x10 /* Transfer acknowledge (output) */ 52 #define TIP 0x20 /* Transfer in progress (output) */ 53 54 /* Bits in ACR */ 55 #define SR_CTRL 0x1c /* Shift register control bits */ 56 #define SR_EXT 0x0c /* Shift on external clock */ 57 #define SR_OUT 0x10 /* Shift out if 1 */ 58 59 /* Bits in IFR and IER */ 60 #define IER_SET 0x80 /* set bits in IER */ 61 #define IER_CLR 0 /* clear bits in IER */ 62 #define SR_INT 0x04 /* Shift register full/empty */ 63 #define SR_DATA_INT 0x08 64 #define SR_CLOCK_INT 0x10 65 #define T1_INT 0x40 /* Timer 1 interrupt */ 66 #define T2_INT 0x20 /* Timer 2 interrupt */ 67 68 /* Bits in ACR */ 69 #define T1MODE 0xc0 /* Timer 1 mode */ 70 #define T1MODE_CONT 0x40 /* continuous interrupts */ 71 72 /* commands (1st byte) */ 73 #define ADB_PACKET 0 74 #define CUDA_PACKET 1 75 #define ERROR_PACKET 2 76 #define TIMER_PACKET 3 77 #define POWER_PACKET 4 78 #define MACIIC_PACKET 5 79 #define PMU_PACKET 6 80 81 82 /* CUDA commands (2nd byte) */ 83 #define CUDA_WARM_START 0x0 84 #define CUDA_AUTOPOLL 0x1 85 #define CUDA_GET_6805_ADDR 0x2 86 #define CUDA_GET_TIME 0x3 87 #define CUDA_GET_PRAM 0x7 88 #define CUDA_SET_6805_ADDR 0x8 89 #define CUDA_SET_TIME 0x9 90 #define CUDA_POWERDOWN 0xa 91 #define CUDA_POWERUP_TIME 0xb 92 #define CUDA_SET_PRAM 0xc 93 #define CUDA_MS_RESET 0xd 94 #define CUDA_SEND_DFAC 0xe 95 #define CUDA_BATTERY_SWAP_SENSE 0x10 96 #define CUDA_RESET_SYSTEM 0x11 97 #define CUDA_SET_IPL 0x12 98 #define CUDA_FILE_SERVER_FLAG 0x13 99 #define CUDA_SET_AUTO_RATE 0x14 100 #define CUDA_GET_AUTO_RATE 0x16 101 #define CUDA_SET_DEVICE_LIST 0x19 102 #define CUDA_GET_DEVICE_LIST 0x1a 103 #define CUDA_SET_ONE_SECOND_MODE 0x1b 104 #define CUDA_SET_POWER_MESSAGES 0x21 105 #define CUDA_GET_SET_IIC 0x22 106 #define CUDA_WAKEUP 0x23 107 #define CUDA_TIMER_TICKLE 0x24 108 #define CUDA_COMBINED_FORMAT_IIC 0x25 109 110 #define CUDA_TIMER_FREQ (4700000 / 6) 111 112 /* CUDA returns time_t's offset from Jan 1, 1904, not 1970 */ 113 #define RTC_OFFSET 2082844800 114 115 /* CUDA registers */ 116 #define CUDA_REG_B 0x00 117 #define CUDA_REG_A 0x01 118 #define CUDA_REG_DIRB 0x02 119 #define CUDA_REG_DIRA 0x03 120 #define CUDA_REG_T1CL 0x04 121 #define CUDA_REG_T1CH 0x05 122 #define CUDA_REG_T1LL 0x06 123 #define CUDA_REG_T1LH 0x07 124 #define CUDA_REG_T2CL 0x08 125 #define CUDA_REG_T2CH 0x09 126 #define CUDA_REG_SR 0x0a 127 #define CUDA_REG_ACR 0x0b 128 #define CUDA_REG_PCR 0x0c 129 #define CUDA_REG_IFR 0x0d 130 #define CUDA_REG_IER 0x0e 131 #define CUDA_REG_ANH 0x0f 132 133 static void cuda_update(CUDAState *s); 134 static void cuda_receive_packet_from_host(CUDAState *s, 135 const uint8_t *data, int len); 136 static void cuda_timer_update(CUDAState *s, CUDATimer *ti, 137 int64_t current_time); 138 139 static void cuda_update_irq(CUDAState *s) 140 { 141 if (s->ifr & s->ier & (SR_INT | T1_INT | T2_INT)) { 142 qemu_irq_raise(s->irq); 143 } else { 144 qemu_irq_lower(s->irq); 145 } 146 } 147 148 static uint64_t get_tb(uint64_t time, uint64_t freq) 149 { 150 return muldiv64(time, freq, NANOSECONDS_PER_SECOND); 151 } 152 153 static unsigned int get_counter(CUDAState *s, CUDATimer *ti) 154 { 155 int64_t d; 156 unsigned int counter; 157 uint64_t tb_diff; 158 uint64_t current_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 159 160 /* Reverse of the tb calculation algorithm that Mac OS X uses on bootup. */ 161 tb_diff = get_tb(current_time, ti->frequency) - ti->load_time; 162 d = (tb_diff * 0xBF401675E5DULL) / (ti->frequency << 24); 163 164 if (ti->index == 0) { 165 /* the timer goes down from latch to -1 (period of latch + 2) */ 166 if (d <= (ti->counter_value + 1)) { 167 counter = (ti->counter_value - d) & 0xffff; 168 } else { 169 counter = (d - (ti->counter_value + 1)) % (ti->latch + 2); 170 counter = (ti->latch - counter) & 0xffff; 171 } 172 } else { 173 counter = (ti->counter_value - d) & 0xffff; 174 } 175 return counter; 176 } 177 178 static void set_counter(CUDAState *s, CUDATimer *ti, unsigned int val) 179 { 180 CUDA_DPRINTF("T%d.counter=%d\n", 1 + ti->index, val); 181 ti->load_time = get_tb(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), 182 s->tb_frequency); 183 ti->counter_value = val; 184 cuda_timer_update(s, ti, ti->load_time); 185 } 186 187 static int64_t get_next_irq_time(CUDATimer *ti, int64_t current_time) 188 { 189 int64_t d, next_time; 190 unsigned int counter; 191 192 /* current counter value */ 193 d = muldiv64(current_time - ti->load_time, 194 CUDA_TIMER_FREQ, NANOSECONDS_PER_SECOND); 195 /* the timer goes down from latch to -1 (period of latch + 2) */ 196 if (d <= (ti->counter_value + 1)) { 197 counter = (ti->counter_value - d) & 0xffff; 198 } else { 199 counter = (d - (ti->counter_value + 1)) % (ti->latch + 2); 200 counter = (ti->latch - counter) & 0xffff; 201 } 202 203 /* Note: we consider the irq is raised on 0 */ 204 if (counter == 0xffff) { 205 next_time = d + ti->latch + 1; 206 } else if (counter == 0) { 207 next_time = d + ti->latch + 2; 208 } else { 209 next_time = d + counter; 210 } 211 CUDA_DPRINTF("latch=%d counter=%" PRId64 " delta_next=%" PRId64 "\n", 212 ti->latch, d, next_time - d); 213 next_time = muldiv64(next_time, NANOSECONDS_PER_SECOND, CUDA_TIMER_FREQ) + 214 ti->load_time; 215 if (next_time <= current_time) { 216 next_time = current_time + 1; 217 } 218 return next_time; 219 } 220 221 static void cuda_timer_update(CUDAState *s, CUDATimer *ti, 222 int64_t current_time) 223 { 224 if (!ti->timer) 225 return; 226 if (ti->index == 0 && (s->acr & T1MODE) != T1MODE_CONT) { 227 timer_del(ti->timer); 228 } else { 229 ti->next_irq_time = get_next_irq_time(ti, current_time); 230 timer_mod(ti->timer, ti->next_irq_time); 231 } 232 } 233 234 static void cuda_timer1(void *opaque) 235 { 236 CUDAState *s = opaque; 237 CUDATimer *ti = &s->timers[0]; 238 239 cuda_timer_update(s, ti, ti->next_irq_time); 240 s->ifr |= T1_INT; 241 cuda_update_irq(s); 242 } 243 244 static void cuda_timer2(void *opaque) 245 { 246 CUDAState *s = opaque; 247 CUDATimer *ti = &s->timers[1]; 248 249 cuda_timer_update(s, ti, ti->next_irq_time); 250 s->ifr |= T2_INT; 251 cuda_update_irq(s); 252 } 253 254 static void cuda_set_sr_int(void *opaque) 255 { 256 CUDAState *s = opaque; 257 258 CUDA_DPRINTF("CUDA: %s:%d\n", __func__, __LINE__); 259 s->ifr |= SR_INT; 260 cuda_update_irq(s); 261 } 262 263 static void cuda_delay_set_sr_int(CUDAState *s) 264 { 265 int64_t expire; 266 267 if (s->dirb == 0xff) { 268 /* Not in Mac OS, fire the IRQ directly */ 269 cuda_set_sr_int(s); 270 return; 271 } 272 273 CUDA_DPRINTF("CUDA: %s:%d\n", __func__, __LINE__); 274 275 expire = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 300 * SCALE_US; 276 timer_mod(s->sr_delay_timer, expire); 277 } 278 279 static uint64_t cuda_read(void *opaque, hwaddr addr, unsigned size) 280 { 281 CUDAState *s = opaque; 282 uint32_t val; 283 284 addr = (addr >> 9) & 0xf; 285 switch(addr) { 286 case CUDA_REG_B: 287 val = s->b; 288 break; 289 case CUDA_REG_A: 290 val = s->a; 291 break; 292 case CUDA_REG_DIRB: 293 val = s->dirb; 294 break; 295 case CUDA_REG_DIRA: 296 val = s->dira; 297 break; 298 case CUDA_REG_T1CL: 299 val = get_counter(s, &s->timers[0]) & 0xff; 300 s->ifr &= ~T1_INT; 301 cuda_update_irq(s); 302 break; 303 case CUDA_REG_T1CH: 304 val = get_counter(s, &s->timers[0]) >> 8; 305 cuda_update_irq(s); 306 break; 307 case CUDA_REG_T1LL: 308 val = s->timers[0].latch & 0xff; 309 break; 310 case CUDA_REG_T1LH: 311 /* XXX: check this */ 312 val = (s->timers[0].latch >> 8) & 0xff; 313 break; 314 case CUDA_REG_T2CL: 315 val = get_counter(s, &s->timers[1]) & 0xff; 316 s->ifr &= ~T2_INT; 317 cuda_update_irq(s); 318 break; 319 case CUDA_REG_T2CH: 320 val = get_counter(s, &s->timers[1]) >> 8; 321 break; 322 case CUDA_REG_SR: 323 val = s->sr; 324 s->ifr &= ~(SR_INT | SR_CLOCK_INT | SR_DATA_INT); 325 cuda_update_irq(s); 326 break; 327 case CUDA_REG_ACR: 328 val = s->acr; 329 break; 330 case CUDA_REG_PCR: 331 val = s->pcr; 332 break; 333 case CUDA_REG_IFR: 334 val = s->ifr; 335 if (s->ifr & s->ier) { 336 val |= 0x80; 337 } 338 break; 339 case CUDA_REG_IER: 340 val = s->ier | 0x80; 341 break; 342 default: 343 case CUDA_REG_ANH: 344 val = s->anh; 345 break; 346 } 347 if (addr != CUDA_REG_IFR || val != 0) { 348 CUDA_DPRINTF("read: reg=0x%x val=%02x\n", (int)addr, val); 349 } 350 351 return val; 352 } 353 354 static void cuda_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) 355 { 356 CUDAState *s = opaque; 357 358 addr = (addr >> 9) & 0xf; 359 CUDA_DPRINTF("write: reg=0x%x val=%02x\n", (int)addr, val); 360 361 switch(addr) { 362 case CUDA_REG_B: 363 s->b = (s->b & ~s->dirb) | (val & s->dirb); 364 cuda_update(s); 365 break; 366 case CUDA_REG_A: 367 s->a = (s->a & ~s->dira) | (val & s->dira); 368 break; 369 case CUDA_REG_DIRB: 370 s->dirb = val; 371 break; 372 case CUDA_REG_DIRA: 373 s->dira = val; 374 break; 375 case CUDA_REG_T1CL: 376 s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; 377 cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 378 break; 379 case CUDA_REG_T1CH: 380 s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); 381 s->ifr &= ~T1_INT; 382 set_counter(s, &s->timers[0], s->timers[0].latch); 383 break; 384 case CUDA_REG_T1LL: 385 s->timers[0].latch = (s->timers[0].latch & 0xff00) | val; 386 cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 387 break; 388 case CUDA_REG_T1LH: 389 s->timers[0].latch = (s->timers[0].latch & 0xff) | (val << 8); 390 s->ifr &= ~T1_INT; 391 cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 392 break; 393 case CUDA_REG_T2CL: 394 s->timers[1].latch = (s->timers[1].latch & 0xff00) | val; 395 break; 396 case CUDA_REG_T2CH: 397 /* To ensure T2 generates an interrupt on zero crossing with the 398 common timer code, write the value directly from the latch to 399 the counter */ 400 s->timers[1].latch = (s->timers[1].latch & 0xff) | (val << 8); 401 s->ifr &= ~T2_INT; 402 set_counter(s, &s->timers[1], s->timers[1].latch); 403 break; 404 case CUDA_REG_SR: 405 s->sr = val; 406 break; 407 case CUDA_REG_ACR: 408 s->acr = val; 409 cuda_timer_update(s, &s->timers[0], qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 410 cuda_update(s); 411 break; 412 case CUDA_REG_PCR: 413 s->pcr = val; 414 break; 415 case CUDA_REG_IFR: 416 /* reset bits */ 417 s->ifr &= ~val; 418 cuda_update_irq(s); 419 break; 420 case CUDA_REG_IER: 421 if (val & IER_SET) { 422 /* set bits */ 423 s->ier |= val & 0x7f; 424 } else { 425 /* reset bits */ 426 s->ier &= ~val; 427 } 428 cuda_update_irq(s); 429 break; 430 default: 431 case CUDA_REG_ANH: 432 s->anh = val; 433 break; 434 } 435 } 436 437 /* NOTE: TIP and TREQ are negated */ 438 static void cuda_update(CUDAState *s) 439 { 440 int packet_received, len; 441 442 packet_received = 0; 443 if (!(s->b & TIP)) { 444 /* transfer requested from host */ 445 446 if (s->acr & SR_OUT) { 447 /* data output */ 448 if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) { 449 if (s->data_out_index < sizeof(s->data_out)) { 450 CUDA_DPRINTF("send: %02x\n", s->sr); 451 s->data_out[s->data_out_index++] = s->sr; 452 cuda_delay_set_sr_int(s); 453 } 454 } 455 } else { 456 if (s->data_in_index < s->data_in_size) { 457 /* data input */ 458 if ((s->b & (TACK | TIP)) != (s->last_b & (TACK | TIP))) { 459 s->sr = s->data_in[s->data_in_index++]; 460 CUDA_DPRINTF("recv: %02x\n", s->sr); 461 /* indicate end of transfer */ 462 if (s->data_in_index >= s->data_in_size) { 463 s->b = (s->b | TREQ); 464 } 465 cuda_delay_set_sr_int(s); 466 } 467 } 468 } 469 } else { 470 /* no transfer requested: handle sync case */ 471 if ((s->last_b & TIP) && (s->b & TACK) != (s->last_b & TACK)) { 472 /* update TREQ state each time TACK change state */ 473 if (s->b & TACK) 474 s->b = (s->b | TREQ); 475 else 476 s->b = (s->b & ~TREQ); 477 cuda_delay_set_sr_int(s); 478 } else { 479 if (!(s->last_b & TIP)) { 480 /* handle end of host to cuda transfer */ 481 packet_received = (s->data_out_index > 0); 482 /* always an IRQ at the end of transfer */ 483 cuda_delay_set_sr_int(s); 484 } 485 /* signal if there is data to read */ 486 if (s->data_in_index < s->data_in_size) { 487 s->b = (s->b & ~TREQ); 488 } 489 } 490 } 491 492 s->last_acr = s->acr; 493 s->last_b = s->b; 494 495 /* NOTE: cuda_receive_packet_from_host() can call cuda_update() 496 recursively */ 497 if (packet_received) { 498 len = s->data_out_index; 499 s->data_out_index = 0; 500 cuda_receive_packet_from_host(s, s->data_out, len); 501 } 502 } 503 504 static void cuda_send_packet_to_host(CUDAState *s, 505 const uint8_t *data, int len) 506 { 507 #ifdef DEBUG_CUDA_PACKET 508 { 509 int i; 510 printf("cuda_send_packet_to_host:\n"); 511 for(i = 0; i < len; i++) 512 printf(" %02x", data[i]); 513 printf("\n"); 514 } 515 #endif 516 memcpy(s->data_in, data, len); 517 s->data_in_size = len; 518 s->data_in_index = 0; 519 cuda_update(s); 520 cuda_delay_set_sr_int(s); 521 } 522 523 static void cuda_adb_poll(void *opaque) 524 { 525 CUDAState *s = opaque; 526 uint8_t obuf[ADB_MAX_OUT_LEN + 2]; 527 int olen; 528 529 olen = adb_poll(&s->adb_bus, obuf + 2, s->adb_poll_mask); 530 if (olen > 0) { 531 obuf[0] = ADB_PACKET; 532 obuf[1] = 0x40; /* polled data */ 533 cuda_send_packet_to_host(s, obuf, olen + 2); 534 } 535 timer_mod(s->adb_poll_timer, 536 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 537 (NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms))); 538 } 539 540 /* description of commands */ 541 typedef struct CudaCommand { 542 uint8_t command; 543 const char *name; 544 bool (*handler)(CUDAState *s, 545 const uint8_t *in_args, int in_len, 546 uint8_t *out_args, int *out_len); 547 } CudaCommand; 548 549 static bool cuda_cmd_autopoll(CUDAState *s, 550 const uint8_t *in_data, int in_len, 551 uint8_t *out_data, int *out_len) 552 { 553 int autopoll; 554 555 if (in_len != 1) { 556 return false; 557 } 558 559 autopoll = (in_data[0] != 0); 560 if (autopoll != s->autopoll) { 561 s->autopoll = autopoll; 562 if (autopoll) { 563 timer_mod(s->adb_poll_timer, 564 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 565 (NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms))); 566 } else { 567 timer_del(s->adb_poll_timer); 568 } 569 } 570 return true; 571 } 572 573 static bool cuda_cmd_set_autorate(CUDAState *s, 574 const uint8_t *in_data, int in_len, 575 uint8_t *out_data, int *out_len) 576 { 577 if (in_len != 1) { 578 return false; 579 } 580 581 /* we don't want a period of 0 ms */ 582 /* FIXME: check what real hardware does */ 583 if (in_data[0] == 0) { 584 return false; 585 } 586 587 s->autopoll_rate_ms = in_data[0]; 588 if (s->autopoll) { 589 timer_mod(s->adb_poll_timer, 590 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 591 (NANOSECONDS_PER_SECOND / (1000 / s->autopoll_rate_ms))); 592 } 593 return true; 594 } 595 596 static bool cuda_cmd_set_device_list(CUDAState *s, 597 const uint8_t *in_data, int in_len, 598 uint8_t *out_data, int *out_len) 599 { 600 if (in_len != 2) { 601 return false; 602 } 603 604 s->adb_poll_mask = (((uint16_t)in_data[0]) << 8) | in_data[1]; 605 return true; 606 } 607 608 static bool cuda_cmd_powerdown(CUDAState *s, 609 const uint8_t *in_data, int in_len, 610 uint8_t *out_data, int *out_len) 611 { 612 if (in_len != 0) { 613 return false; 614 } 615 616 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); 617 return true; 618 } 619 620 static bool cuda_cmd_reset_system(CUDAState *s, 621 const uint8_t *in_data, int in_len, 622 uint8_t *out_data, int *out_len) 623 { 624 if (in_len != 0) { 625 return false; 626 } 627 628 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 629 return true; 630 } 631 632 static bool cuda_cmd_set_file_server_flag(CUDAState *s, 633 const uint8_t *in_data, int in_len, 634 uint8_t *out_data, int *out_len) 635 { 636 if (in_len != 1) { 637 return false; 638 } 639 640 qemu_log_mask(LOG_UNIMP, 641 "CUDA: unimplemented command FILE_SERVER_FLAG %d\n", 642 in_data[0]); 643 return true; 644 } 645 646 static bool cuda_cmd_set_power_message(CUDAState *s, 647 const uint8_t *in_data, int in_len, 648 uint8_t *out_data, int *out_len) 649 { 650 if (in_len != 1) { 651 return false; 652 } 653 654 qemu_log_mask(LOG_UNIMP, 655 "CUDA: unimplemented command SET_POWER_MESSAGE %d\n", 656 in_data[0]); 657 return true; 658 } 659 660 static bool cuda_cmd_get_time(CUDAState *s, 661 const uint8_t *in_data, int in_len, 662 uint8_t *out_data, int *out_len) 663 { 664 uint32_t ti; 665 666 if (in_len != 0) { 667 return false; 668 } 669 670 ti = s->tick_offset + (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) 671 / NANOSECONDS_PER_SECOND); 672 out_data[0] = ti >> 24; 673 out_data[1] = ti >> 16; 674 out_data[2] = ti >> 8; 675 out_data[3] = ti; 676 *out_len = 4; 677 return true; 678 } 679 680 static bool cuda_cmd_set_time(CUDAState *s, 681 const uint8_t *in_data, int in_len, 682 uint8_t *out_data, int *out_len) 683 { 684 uint32_t ti; 685 686 if (in_len != 4) { 687 return false; 688 } 689 690 ti = (((uint32_t)in_data[0]) << 24) + (((uint32_t)in_data[1]) << 16) 691 + (((uint32_t)in_data[2]) << 8) + in_data[3]; 692 s->tick_offset = ti - (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) 693 / NANOSECONDS_PER_SECOND); 694 return true; 695 } 696 697 static const CudaCommand handlers[] = { 698 { CUDA_AUTOPOLL, "AUTOPOLL", cuda_cmd_autopoll }, 699 { CUDA_SET_AUTO_RATE, "SET_AUTO_RATE", cuda_cmd_set_autorate }, 700 { CUDA_SET_DEVICE_LIST, "SET_DEVICE_LIST", cuda_cmd_set_device_list }, 701 { CUDA_POWERDOWN, "POWERDOWN", cuda_cmd_powerdown }, 702 { CUDA_RESET_SYSTEM, "RESET_SYSTEM", cuda_cmd_reset_system }, 703 { CUDA_FILE_SERVER_FLAG, "FILE_SERVER_FLAG", 704 cuda_cmd_set_file_server_flag }, 705 { CUDA_SET_POWER_MESSAGES, "SET_POWER_MESSAGES", 706 cuda_cmd_set_power_message }, 707 { CUDA_GET_TIME, "GET_TIME", cuda_cmd_get_time }, 708 { CUDA_SET_TIME, "SET_TIME", cuda_cmd_set_time }, 709 }; 710 711 static void cuda_receive_packet(CUDAState *s, 712 const uint8_t *data, int len) 713 { 714 uint8_t obuf[16] = { CUDA_PACKET, 0, data[0] }; 715 int i, out_len = 0; 716 717 for (i = 0; i < ARRAY_SIZE(handlers); i++) { 718 const CudaCommand *desc = &handlers[i]; 719 if (desc->command == data[0]) { 720 CUDA_DPRINTF("handling command %s\n", desc->name); 721 out_len = 0; 722 if (desc->handler(s, data + 1, len - 1, obuf + 3, &out_len)) { 723 cuda_send_packet_to_host(s, obuf, 3 + out_len); 724 } else { 725 qemu_log_mask(LOG_GUEST_ERROR, 726 "CUDA: %s: wrong parameters %d\n", 727 desc->name, len); 728 obuf[0] = ERROR_PACKET; 729 obuf[1] = 0x5; /* bad parameters */ 730 obuf[2] = CUDA_PACKET; 731 obuf[3] = data[0]; 732 cuda_send_packet_to_host(s, obuf, 4); 733 } 734 return; 735 } 736 } 737 738 qemu_log_mask(LOG_GUEST_ERROR, "CUDA: unknown command 0x%02x\n", data[0]); 739 obuf[0] = ERROR_PACKET; 740 obuf[1] = 0x2; /* unknown command */ 741 obuf[2] = CUDA_PACKET; 742 obuf[3] = data[0]; 743 cuda_send_packet_to_host(s, obuf, 4); 744 } 745 746 static void cuda_receive_packet_from_host(CUDAState *s, 747 const uint8_t *data, int len) 748 { 749 #ifdef DEBUG_CUDA_PACKET 750 { 751 int i; 752 printf("cuda_receive_packet_from_host:\n"); 753 for(i = 0; i < len; i++) 754 printf(" %02x", data[i]); 755 printf("\n"); 756 } 757 #endif 758 switch(data[0]) { 759 case ADB_PACKET: 760 { 761 uint8_t obuf[ADB_MAX_OUT_LEN + 3]; 762 int olen; 763 olen = adb_request(&s->adb_bus, obuf + 2, data + 1, len - 1); 764 if (olen > 0) { 765 obuf[0] = ADB_PACKET; 766 obuf[1] = 0x00; 767 cuda_send_packet_to_host(s, obuf, olen + 2); 768 } else { 769 /* error */ 770 obuf[0] = ADB_PACKET; 771 obuf[1] = -olen; 772 obuf[2] = data[1]; 773 olen = 0; 774 cuda_send_packet_to_host(s, obuf, olen + 3); 775 } 776 } 777 break; 778 case CUDA_PACKET: 779 cuda_receive_packet(s, data + 1, len - 1); 780 break; 781 } 782 } 783 784 static const MemoryRegionOps cuda_ops = { 785 .read = cuda_read, 786 .write = cuda_write, 787 .endianness = DEVICE_BIG_ENDIAN, 788 .valid = { 789 .min_access_size = 1, 790 .max_access_size = 1, 791 }, 792 }; 793 794 static bool cuda_timer_exist(void *opaque, int version_id) 795 { 796 CUDATimer *s = opaque; 797 798 return s->timer != NULL; 799 } 800 801 static const VMStateDescription vmstate_cuda_timer = { 802 .name = "cuda_timer", 803 .version_id = 0, 804 .minimum_version_id = 0, 805 .fields = (VMStateField[]) { 806 VMSTATE_UINT16(latch, CUDATimer), 807 VMSTATE_UINT16(counter_value, CUDATimer), 808 VMSTATE_INT64(load_time, CUDATimer), 809 VMSTATE_INT64(next_irq_time, CUDATimer), 810 VMSTATE_TIMER_PTR_TEST(timer, CUDATimer, cuda_timer_exist), 811 VMSTATE_END_OF_LIST() 812 } 813 }; 814 815 static const VMStateDescription vmstate_cuda = { 816 .name = "cuda", 817 .version_id = 4, 818 .minimum_version_id = 4, 819 .fields = (VMStateField[]) { 820 VMSTATE_UINT8(a, CUDAState), 821 VMSTATE_UINT8(b, CUDAState), 822 VMSTATE_UINT8(last_b, CUDAState), 823 VMSTATE_UINT8(dira, CUDAState), 824 VMSTATE_UINT8(dirb, CUDAState), 825 VMSTATE_UINT8(sr, CUDAState), 826 VMSTATE_UINT8(acr, CUDAState), 827 VMSTATE_UINT8(last_acr, CUDAState), 828 VMSTATE_UINT8(pcr, CUDAState), 829 VMSTATE_UINT8(ifr, CUDAState), 830 VMSTATE_UINT8(ier, CUDAState), 831 VMSTATE_UINT8(anh, CUDAState), 832 VMSTATE_INT32(data_in_size, CUDAState), 833 VMSTATE_INT32(data_in_index, CUDAState), 834 VMSTATE_INT32(data_out_index, CUDAState), 835 VMSTATE_UINT8(autopoll, CUDAState), 836 VMSTATE_UINT8(autopoll_rate_ms, CUDAState), 837 VMSTATE_UINT16(adb_poll_mask, CUDAState), 838 VMSTATE_BUFFER(data_in, CUDAState), 839 VMSTATE_BUFFER(data_out, CUDAState), 840 VMSTATE_UINT32(tick_offset, CUDAState), 841 VMSTATE_STRUCT_ARRAY(timers, CUDAState, 2, 1, 842 vmstate_cuda_timer, CUDATimer), 843 VMSTATE_TIMER_PTR(adb_poll_timer, CUDAState), 844 VMSTATE_TIMER_PTR(sr_delay_timer, CUDAState), 845 VMSTATE_END_OF_LIST() 846 } 847 }; 848 849 static void cuda_reset(DeviceState *dev) 850 { 851 CUDAState *s = CUDA(dev); 852 853 s->b = 0; 854 s->a = 0; 855 s->dirb = 0xff; 856 s->dira = 0; 857 s->sr = 0; 858 s->acr = 0; 859 s->pcr = 0; 860 s->ifr = 0; 861 s->ier = 0; 862 // s->ier = T1_INT | SR_INT; 863 s->anh = 0; 864 s->data_in_size = 0; 865 s->data_in_index = 0; 866 s->data_out_index = 0; 867 s->autopoll = 0; 868 869 s->timers[0].latch = 0xffff; 870 set_counter(s, &s->timers[0], 0xffff); 871 872 s->timers[1].latch = 0xffff; 873 874 s->sr_delay_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_set_sr_int, s); 875 } 876 877 static void cuda_realizefn(DeviceState *dev, Error **errp) 878 { 879 CUDAState *s = CUDA(dev); 880 struct tm tm; 881 882 s->timers[0].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer1, s); 883 s->timers[0].frequency = s->tb_frequency; 884 s->timers[1].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_timer2, s); 885 s->timers[1].frequency = (SCALE_US * 6000) / 4700; 886 887 qemu_get_timedate(&tm, 0); 888 s->tick_offset = (uint32_t)mktimegm(&tm) + RTC_OFFSET; 889 890 s->adb_poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, cuda_adb_poll, s); 891 s->autopoll_rate_ms = 20; 892 s->adb_poll_mask = 0xffff; 893 } 894 895 static void cuda_initfn(Object *obj) 896 { 897 SysBusDevice *d = SYS_BUS_DEVICE(obj); 898 CUDAState *s = CUDA(obj); 899 int i; 900 901 memory_region_init_io(&s->mem, obj, &cuda_ops, s, "cuda", 0x2000); 902 sysbus_init_mmio(d, &s->mem); 903 sysbus_init_irq(d, &s->irq); 904 905 for (i = 0; i < ARRAY_SIZE(s->timers); i++) { 906 s->timers[i].index = i; 907 } 908 909 qbus_create_inplace(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, 910 DEVICE(obj), "adb.0"); 911 } 912 913 static Property cuda_properties[] = { 914 DEFINE_PROP_UINT64("timebase-frequency", CUDAState, tb_frequency, 0), 915 DEFINE_PROP_END_OF_LIST() 916 }; 917 918 static void cuda_class_init(ObjectClass *oc, void *data) 919 { 920 DeviceClass *dc = DEVICE_CLASS(oc); 921 922 dc->realize = cuda_realizefn; 923 dc->reset = cuda_reset; 924 dc->vmsd = &vmstate_cuda; 925 dc->props = cuda_properties; 926 set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); 927 } 928 929 static const TypeInfo cuda_type_info = { 930 .name = TYPE_CUDA, 931 .parent = TYPE_SYS_BUS_DEVICE, 932 .instance_size = sizeof(CUDAState), 933 .instance_init = cuda_initfn, 934 .class_init = cuda_class_init, 935 }; 936 937 static void cuda_register_types(void) 938 { 939 type_register_static(&cuda_type_info); 940 } 941 942 type_init(cuda_register_types) 943