1 /* 2 * PowerMac descriptor-based DMA emulation 3 * 4 * Copyright (c) 2005-2007 Fabrice Bellard 5 * Copyright (c) 2007 Jocelyn Mayer 6 * Copyright (c) 2009 Laurent Vivier 7 * 8 * some parts from linux-2.6.28, arch/powerpc/include/asm/dbdma.h 9 * 10 * Definitions for using the Apple Descriptor-Based DMA controller 11 * in Power Macintosh computers. 12 * 13 * Copyright (C) 1996 Paul Mackerras. 14 * 15 * some parts from mol 0.9.71 16 * 17 * Descriptor based DMA emulation 18 * 19 * Copyright (C) 1998-2004 Samuel Rydh (samuel@ibrium.se) 20 * 21 * Permission is hereby granted, free of charge, to any person obtaining a copy 22 * of this software and associated documentation files (the "Software"), to deal 23 * in the Software without restriction, including without limitation the rights 24 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 25 * copies of the Software, and to permit persons to whom the Software is 26 * furnished to do so, subject to the following conditions: 27 * 28 * The above copyright notice and this permission notice shall be included in 29 * all copies or substantial portions of the Software. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 34 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 35 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 36 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 37 * THE SOFTWARE. 38 */ 39 #include "qemu/osdep.h" 40 #include "hw/hw.h" 41 #include "hw/isa/isa.h" 42 #include "hw/ppc/mac_dbdma.h" 43 #include "qemu/main-loop.h" 44 45 /* debug DBDMA */ 46 //#define DEBUG_DBDMA 47 48 #ifdef DEBUG_DBDMA 49 #define DBDMA_DPRINTF(fmt, ...) \ 50 do { printf("DBDMA: " fmt , ## __VA_ARGS__); } while (0) 51 #else 52 #define DBDMA_DPRINTF(fmt, ...) 53 #endif 54 55 /* 56 */ 57 58 static DBDMAState *dbdma_from_ch(DBDMA_channel *ch) 59 { 60 return container_of(ch, DBDMAState, channels[ch->channel]); 61 } 62 63 #ifdef DEBUG_DBDMA 64 static void dump_dbdma_cmd(dbdma_cmd *cmd) 65 { 66 printf("dbdma_cmd %p\n", cmd); 67 printf(" req_count 0x%04x\n", le16_to_cpu(cmd->req_count)); 68 printf(" command 0x%04x\n", le16_to_cpu(cmd->command)); 69 printf(" phy_addr 0x%08x\n", le32_to_cpu(cmd->phy_addr)); 70 printf(" cmd_dep 0x%08x\n", le32_to_cpu(cmd->cmd_dep)); 71 printf(" res_count 0x%04x\n", le16_to_cpu(cmd->res_count)); 72 printf(" xfer_status 0x%04x\n", le16_to_cpu(cmd->xfer_status)); 73 } 74 #else 75 static void dump_dbdma_cmd(dbdma_cmd *cmd) 76 { 77 } 78 #endif 79 static void dbdma_cmdptr_load(DBDMA_channel *ch) 80 { 81 DBDMA_DPRINTF("dbdma_cmdptr_load 0x%08x\n", 82 ch->regs[DBDMA_CMDPTR_LO]); 83 cpu_physical_memory_read(ch->regs[DBDMA_CMDPTR_LO], 84 &ch->current, sizeof(dbdma_cmd)); 85 } 86 87 static void dbdma_cmdptr_save(DBDMA_channel *ch) 88 { 89 DBDMA_DPRINTF("dbdma_cmdptr_save 0x%08x\n", 90 ch->regs[DBDMA_CMDPTR_LO]); 91 DBDMA_DPRINTF("xfer_status 0x%08x res_count 0x%04x\n", 92 le16_to_cpu(ch->current.xfer_status), 93 le16_to_cpu(ch->current.res_count)); 94 cpu_physical_memory_write(ch->regs[DBDMA_CMDPTR_LO], 95 &ch->current, sizeof(dbdma_cmd)); 96 } 97 98 static void kill_channel(DBDMA_channel *ch) 99 { 100 DBDMA_DPRINTF("kill_channel\n"); 101 102 ch->regs[DBDMA_STATUS] |= DEAD; 103 ch->regs[DBDMA_STATUS] &= ~ACTIVE; 104 105 qemu_irq_raise(ch->irq); 106 } 107 108 static void conditional_interrupt(DBDMA_channel *ch) 109 { 110 dbdma_cmd *current = &ch->current; 111 uint16_t intr; 112 uint16_t sel_mask, sel_value; 113 uint32_t status; 114 int cond; 115 116 DBDMA_DPRINTF("%s\n", __func__); 117 118 intr = le16_to_cpu(current->command) & INTR_MASK; 119 120 switch(intr) { 121 case INTR_NEVER: /* don't interrupt */ 122 return; 123 case INTR_ALWAYS: /* always interrupt */ 124 qemu_irq_raise(ch->irq); 125 DBDMA_DPRINTF("%s: raise\n", __func__); 126 return; 127 } 128 129 status = ch->regs[DBDMA_STATUS] & DEVSTAT; 130 131 sel_mask = (ch->regs[DBDMA_INTR_SEL] >> 16) & 0x0f; 132 sel_value = ch->regs[DBDMA_INTR_SEL] & 0x0f; 133 134 cond = (status & sel_mask) == (sel_value & sel_mask); 135 136 switch(intr) { 137 case INTR_IFSET: /* intr if condition bit is 1 */ 138 if (cond) { 139 qemu_irq_raise(ch->irq); 140 DBDMA_DPRINTF("%s: raise\n", __func__); 141 } 142 return; 143 case INTR_IFCLR: /* intr if condition bit is 0 */ 144 if (!cond) { 145 qemu_irq_raise(ch->irq); 146 DBDMA_DPRINTF("%s: raise\n", __func__); 147 } 148 return; 149 } 150 } 151 152 static int conditional_wait(DBDMA_channel *ch) 153 { 154 dbdma_cmd *current = &ch->current; 155 uint16_t wait; 156 uint16_t sel_mask, sel_value; 157 uint32_t status; 158 int cond; 159 160 DBDMA_DPRINTF("conditional_wait\n"); 161 162 wait = le16_to_cpu(current->command) & WAIT_MASK; 163 164 switch(wait) { 165 case WAIT_NEVER: /* don't wait */ 166 return 0; 167 case WAIT_ALWAYS: /* always wait */ 168 return 1; 169 } 170 171 status = ch->regs[DBDMA_STATUS] & DEVSTAT; 172 173 sel_mask = (ch->regs[DBDMA_WAIT_SEL] >> 16) & 0x0f; 174 sel_value = ch->regs[DBDMA_WAIT_SEL] & 0x0f; 175 176 cond = (status & sel_mask) == (sel_value & sel_mask); 177 178 switch(wait) { 179 case WAIT_IFSET: /* wait if condition bit is 1 */ 180 if (cond) 181 return 1; 182 return 0; 183 case WAIT_IFCLR: /* wait if condition bit is 0 */ 184 if (!cond) 185 return 1; 186 return 0; 187 } 188 return 0; 189 } 190 191 static void next(DBDMA_channel *ch) 192 { 193 uint32_t cp; 194 195 ch->regs[DBDMA_STATUS] &= ~BT; 196 197 cp = ch->regs[DBDMA_CMDPTR_LO]; 198 ch->regs[DBDMA_CMDPTR_LO] = cp + sizeof(dbdma_cmd); 199 dbdma_cmdptr_load(ch); 200 } 201 202 static void branch(DBDMA_channel *ch) 203 { 204 dbdma_cmd *current = &ch->current; 205 206 ch->regs[DBDMA_CMDPTR_LO] = current->cmd_dep; 207 ch->regs[DBDMA_STATUS] |= BT; 208 dbdma_cmdptr_load(ch); 209 } 210 211 static void conditional_branch(DBDMA_channel *ch) 212 { 213 dbdma_cmd *current = &ch->current; 214 uint16_t br; 215 uint16_t sel_mask, sel_value; 216 uint32_t status; 217 int cond; 218 219 DBDMA_DPRINTF("conditional_branch\n"); 220 221 /* check if we must branch */ 222 223 br = le16_to_cpu(current->command) & BR_MASK; 224 225 switch(br) { 226 case BR_NEVER: /* don't branch */ 227 next(ch); 228 return; 229 case BR_ALWAYS: /* always branch */ 230 branch(ch); 231 return; 232 } 233 234 status = ch->regs[DBDMA_STATUS] & DEVSTAT; 235 236 sel_mask = (ch->regs[DBDMA_BRANCH_SEL] >> 16) & 0x0f; 237 sel_value = ch->regs[DBDMA_BRANCH_SEL] & 0x0f; 238 239 cond = (status & sel_mask) == (sel_value & sel_mask); 240 241 switch(br) { 242 case BR_IFSET: /* branch if condition bit is 1 */ 243 if (cond) 244 branch(ch); 245 else 246 next(ch); 247 return; 248 case BR_IFCLR: /* branch if condition bit is 0 */ 249 if (!cond) 250 branch(ch); 251 else 252 next(ch); 253 return; 254 } 255 } 256 257 static void channel_run(DBDMA_channel *ch); 258 259 static void dbdma_end(DBDMA_io *io) 260 { 261 DBDMA_channel *ch = io->channel; 262 dbdma_cmd *current = &ch->current; 263 264 DBDMA_DPRINTF("%s\n", __func__); 265 266 if (conditional_wait(ch)) 267 goto wait; 268 269 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); 270 current->res_count = cpu_to_le16(io->len); 271 dbdma_cmdptr_save(ch); 272 if (io->is_last) 273 ch->regs[DBDMA_STATUS] &= ~FLUSH; 274 275 conditional_interrupt(ch); 276 conditional_branch(ch); 277 278 wait: 279 /* Indicate that we're ready for a new DMA round */ 280 ch->io.processing = false; 281 282 if ((ch->regs[DBDMA_STATUS] & RUN) && 283 (ch->regs[DBDMA_STATUS] & ACTIVE)) 284 channel_run(ch); 285 } 286 287 static void start_output(DBDMA_channel *ch, int key, uint32_t addr, 288 uint16_t req_count, int is_last) 289 { 290 DBDMA_DPRINTF("start_output\n"); 291 292 /* KEY_REGS, KEY_DEVICE and KEY_STREAM 293 * are not implemented in the mac-io chip 294 */ 295 296 DBDMA_DPRINTF("addr 0x%x key 0x%x\n", addr, key); 297 if (!addr || key > KEY_STREAM3) { 298 kill_channel(ch); 299 return; 300 } 301 302 ch->io.addr = addr; 303 ch->io.len = req_count; 304 ch->io.is_last = is_last; 305 ch->io.dma_end = dbdma_end; 306 ch->io.is_dma_out = 1; 307 ch->io.processing = true; 308 if (ch->rw) { 309 ch->rw(&ch->io); 310 } 311 } 312 313 static void start_input(DBDMA_channel *ch, int key, uint32_t addr, 314 uint16_t req_count, int is_last) 315 { 316 DBDMA_DPRINTF("start_input\n"); 317 318 /* KEY_REGS, KEY_DEVICE and KEY_STREAM 319 * are not implemented in the mac-io chip 320 */ 321 322 DBDMA_DPRINTF("addr 0x%x key 0x%x\n", addr, key); 323 if (!addr || key > KEY_STREAM3) { 324 kill_channel(ch); 325 return; 326 } 327 328 ch->io.addr = addr; 329 ch->io.len = req_count; 330 ch->io.is_last = is_last; 331 ch->io.dma_end = dbdma_end; 332 ch->io.is_dma_out = 0; 333 ch->io.processing = true; 334 if (ch->rw) { 335 ch->rw(&ch->io); 336 } 337 } 338 339 static void load_word(DBDMA_channel *ch, int key, uint32_t addr, 340 uint16_t len) 341 { 342 dbdma_cmd *current = &ch->current; 343 uint32_t val; 344 345 DBDMA_DPRINTF("load_word\n"); 346 347 /* only implements KEY_SYSTEM */ 348 349 if (key != KEY_SYSTEM) { 350 printf("DBDMA: LOAD_WORD, unimplemented key %x\n", key); 351 kill_channel(ch); 352 return; 353 } 354 355 cpu_physical_memory_read(addr, &val, len); 356 357 if (len == 2) 358 val = (val << 16) | (current->cmd_dep & 0x0000ffff); 359 else if (len == 1) 360 val = (val << 24) | (current->cmd_dep & 0x00ffffff); 361 362 current->cmd_dep = val; 363 364 if (conditional_wait(ch)) 365 goto wait; 366 367 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); 368 dbdma_cmdptr_save(ch); 369 ch->regs[DBDMA_STATUS] &= ~FLUSH; 370 371 conditional_interrupt(ch); 372 next(ch); 373 374 wait: 375 DBDMA_kick(dbdma_from_ch(ch)); 376 } 377 378 static void store_word(DBDMA_channel *ch, int key, uint32_t addr, 379 uint16_t len) 380 { 381 dbdma_cmd *current = &ch->current; 382 uint32_t val; 383 384 DBDMA_DPRINTF("store_word\n"); 385 386 /* only implements KEY_SYSTEM */ 387 388 if (key != KEY_SYSTEM) { 389 printf("DBDMA: STORE_WORD, unimplemented key %x\n", key); 390 kill_channel(ch); 391 return; 392 } 393 394 val = current->cmd_dep; 395 if (len == 2) 396 val >>= 16; 397 else if (len == 1) 398 val >>= 24; 399 400 cpu_physical_memory_write(addr, &val, len); 401 402 if (conditional_wait(ch)) 403 goto wait; 404 405 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); 406 dbdma_cmdptr_save(ch); 407 ch->regs[DBDMA_STATUS] &= ~FLUSH; 408 409 conditional_interrupt(ch); 410 next(ch); 411 412 wait: 413 DBDMA_kick(dbdma_from_ch(ch)); 414 } 415 416 static void nop(DBDMA_channel *ch) 417 { 418 dbdma_cmd *current = &ch->current; 419 420 if (conditional_wait(ch)) 421 goto wait; 422 423 current->xfer_status = cpu_to_le16(ch->regs[DBDMA_STATUS]); 424 dbdma_cmdptr_save(ch); 425 426 conditional_interrupt(ch); 427 conditional_branch(ch); 428 429 wait: 430 DBDMA_kick(dbdma_from_ch(ch)); 431 } 432 433 static void stop(DBDMA_channel *ch) 434 { 435 ch->regs[DBDMA_STATUS] &= ~(ACTIVE|DEAD|FLUSH); 436 437 /* the stop command does not increment command pointer */ 438 } 439 440 static void channel_run(DBDMA_channel *ch) 441 { 442 dbdma_cmd *current = &ch->current; 443 uint16_t cmd, key; 444 uint16_t req_count; 445 uint32_t phy_addr; 446 447 DBDMA_DPRINTF("channel_run\n"); 448 dump_dbdma_cmd(current); 449 450 /* clear WAKE flag at command fetch */ 451 452 ch->regs[DBDMA_STATUS] &= ~WAKE; 453 454 cmd = le16_to_cpu(current->command) & COMMAND_MASK; 455 456 switch (cmd) { 457 case DBDMA_NOP: 458 nop(ch); 459 return; 460 461 case DBDMA_STOP: 462 stop(ch); 463 return; 464 } 465 466 key = le16_to_cpu(current->command) & 0x0700; 467 req_count = le16_to_cpu(current->req_count); 468 phy_addr = le32_to_cpu(current->phy_addr); 469 470 if (key == KEY_STREAM4) { 471 printf("command %x, invalid key 4\n", cmd); 472 kill_channel(ch); 473 return; 474 } 475 476 switch (cmd) { 477 case OUTPUT_MORE: 478 start_output(ch, key, phy_addr, req_count, 0); 479 return; 480 481 case OUTPUT_LAST: 482 start_output(ch, key, phy_addr, req_count, 1); 483 return; 484 485 case INPUT_MORE: 486 start_input(ch, key, phy_addr, req_count, 0); 487 return; 488 489 case INPUT_LAST: 490 start_input(ch, key, phy_addr, req_count, 1); 491 return; 492 } 493 494 if (key < KEY_REGS) { 495 printf("command %x, invalid key %x\n", cmd, key); 496 key = KEY_SYSTEM; 497 } 498 499 /* for LOAD_WORD and STORE_WORD, req_count is on 3 bits 500 * and BRANCH is invalid 501 */ 502 503 req_count = req_count & 0x0007; 504 if (req_count & 0x4) { 505 req_count = 4; 506 phy_addr &= ~3; 507 } else if (req_count & 0x2) { 508 req_count = 2; 509 phy_addr &= ~1; 510 } else 511 req_count = 1; 512 513 switch (cmd) { 514 case LOAD_WORD: 515 load_word(ch, key, phy_addr, req_count); 516 return; 517 518 case STORE_WORD: 519 store_word(ch, key, phy_addr, req_count); 520 return; 521 } 522 } 523 524 static void DBDMA_run(DBDMAState *s) 525 { 526 int channel; 527 528 for (channel = 0; channel < DBDMA_CHANNELS; channel++) { 529 DBDMA_channel *ch = &s->channels[channel]; 530 uint32_t status = ch->regs[DBDMA_STATUS]; 531 if (!ch->io.processing && (status & RUN) && (status & ACTIVE)) { 532 channel_run(ch); 533 } 534 } 535 } 536 537 static void DBDMA_run_bh(void *opaque) 538 { 539 DBDMAState *s = opaque; 540 541 DBDMA_DPRINTF("DBDMA_run_bh\n"); 542 543 DBDMA_run(s); 544 } 545 546 void DBDMA_kick(DBDMAState *dbdma) 547 { 548 qemu_bh_schedule(dbdma->bh); 549 } 550 551 void DBDMA_register_channel(void *dbdma, int nchan, qemu_irq irq, 552 DBDMA_rw rw, DBDMA_flush flush, 553 void *opaque) 554 { 555 DBDMAState *s = dbdma; 556 DBDMA_channel *ch = &s->channels[nchan]; 557 558 DBDMA_DPRINTF("DBDMA_register_channel 0x%x\n", nchan); 559 560 assert(rw); 561 assert(flush); 562 563 ch->irq = irq; 564 ch->rw = rw; 565 ch->flush = flush; 566 ch->io.opaque = opaque; 567 } 568 569 static void 570 dbdma_control_write(DBDMA_channel *ch) 571 { 572 uint16_t mask, value; 573 uint32_t status; 574 575 mask = (ch->regs[DBDMA_CONTROL] >> 16) & 0xffff; 576 value = ch->regs[DBDMA_CONTROL] & 0xffff; 577 578 value &= (RUN | PAUSE | FLUSH | WAKE | DEVSTAT); 579 580 status = ch->regs[DBDMA_STATUS]; 581 582 status = (value & mask) | (status & ~mask); 583 584 if (status & WAKE) 585 status |= ACTIVE; 586 if (status & RUN) { 587 status |= ACTIVE; 588 status &= ~DEAD; 589 } 590 if (status & PAUSE) 591 status &= ~ACTIVE; 592 if ((ch->regs[DBDMA_STATUS] & RUN) && !(status & RUN)) { 593 /* RUN is cleared */ 594 status &= ~(ACTIVE|DEAD); 595 } 596 597 if ((status & FLUSH) && ch->flush) { 598 ch->flush(&ch->io); 599 status &= ~FLUSH; 600 } 601 602 DBDMA_DPRINTF(" status 0x%08x\n", status); 603 604 ch->regs[DBDMA_STATUS] = status; 605 606 if (status & ACTIVE) { 607 DBDMA_kick(dbdma_from_ch(ch)); 608 } 609 } 610 611 static void dbdma_write(void *opaque, hwaddr addr, 612 uint64_t value, unsigned size) 613 { 614 int channel = addr >> DBDMA_CHANNEL_SHIFT; 615 DBDMAState *s = opaque; 616 DBDMA_channel *ch = &s->channels[channel]; 617 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2; 618 619 DBDMA_DPRINTF("writel 0x" TARGET_FMT_plx " <= 0x%08"PRIx64"\n", 620 addr, value); 621 DBDMA_DPRINTF("channel 0x%x reg 0x%x\n", 622 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg); 623 624 /* cmdptr cannot be modified if channel is ACTIVE */ 625 626 if (reg == DBDMA_CMDPTR_LO && (ch->regs[DBDMA_STATUS] & ACTIVE)) { 627 return; 628 } 629 630 ch->regs[reg] = value; 631 632 switch(reg) { 633 case DBDMA_CONTROL: 634 dbdma_control_write(ch); 635 break; 636 case DBDMA_CMDPTR_LO: 637 /* 16-byte aligned */ 638 ch->regs[DBDMA_CMDPTR_LO] &= ~0xf; 639 dbdma_cmdptr_load(ch); 640 break; 641 case DBDMA_STATUS: 642 case DBDMA_INTR_SEL: 643 case DBDMA_BRANCH_SEL: 644 case DBDMA_WAIT_SEL: 645 /* nothing to do */ 646 break; 647 case DBDMA_XFER_MODE: 648 case DBDMA_CMDPTR_HI: 649 case DBDMA_DATA2PTR_HI: 650 case DBDMA_DATA2PTR_LO: 651 case DBDMA_ADDRESS_HI: 652 case DBDMA_BRANCH_ADDR_HI: 653 case DBDMA_RES1: 654 case DBDMA_RES2: 655 case DBDMA_RES3: 656 case DBDMA_RES4: 657 /* unused */ 658 break; 659 } 660 } 661 662 static uint64_t dbdma_read(void *opaque, hwaddr addr, 663 unsigned size) 664 { 665 uint32_t value; 666 int channel = addr >> DBDMA_CHANNEL_SHIFT; 667 DBDMAState *s = opaque; 668 DBDMA_channel *ch = &s->channels[channel]; 669 int reg = (addr - (channel << DBDMA_CHANNEL_SHIFT)) >> 2; 670 671 value = ch->regs[reg]; 672 673 DBDMA_DPRINTF("readl 0x" TARGET_FMT_plx " => 0x%08x\n", addr, value); 674 DBDMA_DPRINTF("channel 0x%x reg 0x%x\n", 675 (uint32_t)addr >> DBDMA_CHANNEL_SHIFT, reg); 676 677 switch(reg) { 678 case DBDMA_CONTROL: 679 value = 0; 680 break; 681 case DBDMA_STATUS: 682 case DBDMA_CMDPTR_LO: 683 case DBDMA_INTR_SEL: 684 case DBDMA_BRANCH_SEL: 685 case DBDMA_WAIT_SEL: 686 /* nothing to do */ 687 break; 688 case DBDMA_XFER_MODE: 689 case DBDMA_CMDPTR_HI: 690 case DBDMA_DATA2PTR_HI: 691 case DBDMA_DATA2PTR_LO: 692 case DBDMA_ADDRESS_HI: 693 case DBDMA_BRANCH_ADDR_HI: 694 /* unused */ 695 value = 0; 696 break; 697 case DBDMA_RES1: 698 case DBDMA_RES2: 699 case DBDMA_RES3: 700 case DBDMA_RES4: 701 /* reserved */ 702 break; 703 } 704 705 return value; 706 } 707 708 static const MemoryRegionOps dbdma_ops = { 709 .read = dbdma_read, 710 .write = dbdma_write, 711 .endianness = DEVICE_LITTLE_ENDIAN, 712 .valid = { 713 .min_access_size = 4, 714 .max_access_size = 4, 715 }, 716 }; 717 718 static const VMStateDescription vmstate_dbdma_io = { 719 .name = "dbdma_io", 720 .version_id = 0, 721 .minimum_version_id = 0, 722 .fields = (VMStateField[]) { 723 VMSTATE_UINT64(addr, struct DBDMA_io), 724 VMSTATE_INT32(len, struct DBDMA_io), 725 VMSTATE_INT32(is_last, struct DBDMA_io), 726 VMSTATE_INT32(is_dma_out, struct DBDMA_io), 727 VMSTATE_BOOL(processing, struct DBDMA_io), 728 VMSTATE_END_OF_LIST() 729 } 730 }; 731 732 static const VMStateDescription vmstate_dbdma_cmd = { 733 .name = "dbdma_cmd", 734 .version_id = 0, 735 .minimum_version_id = 0, 736 .fields = (VMStateField[]) { 737 VMSTATE_UINT16(req_count, dbdma_cmd), 738 VMSTATE_UINT16(command, dbdma_cmd), 739 VMSTATE_UINT32(phy_addr, dbdma_cmd), 740 VMSTATE_UINT32(cmd_dep, dbdma_cmd), 741 VMSTATE_UINT16(res_count, dbdma_cmd), 742 VMSTATE_UINT16(xfer_status, dbdma_cmd), 743 VMSTATE_END_OF_LIST() 744 } 745 }; 746 747 static const VMStateDescription vmstate_dbdma_channel = { 748 .name = "dbdma_channel", 749 .version_id = 1, 750 .minimum_version_id = 1, 751 .fields = (VMStateField[]) { 752 VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS), 753 VMSTATE_STRUCT(io, struct DBDMA_channel, 0, vmstate_dbdma_io, DBDMA_io), 754 VMSTATE_STRUCT(current, struct DBDMA_channel, 0, vmstate_dbdma_cmd, 755 dbdma_cmd), 756 VMSTATE_END_OF_LIST() 757 } 758 }; 759 760 static const VMStateDescription vmstate_dbdma = { 761 .name = "dbdma", 762 .version_id = 3, 763 .minimum_version_id = 3, 764 .fields = (VMStateField[]) { 765 VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1, 766 vmstate_dbdma_channel, DBDMA_channel), 767 VMSTATE_END_OF_LIST() 768 } 769 }; 770 771 static void dbdma_reset(void *opaque) 772 { 773 DBDMAState *s = opaque; 774 int i; 775 776 for (i = 0; i < DBDMA_CHANNELS; i++) 777 memset(s->channels[i].regs, 0, DBDMA_SIZE); 778 } 779 780 static void dbdma_unassigned_rw(DBDMA_io *io) 781 { 782 DBDMA_channel *ch = io->channel; 783 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n", 784 __func__, ch->channel); 785 } 786 787 static void dbdma_unassigned_flush(DBDMA_io *io) 788 { 789 DBDMA_channel *ch = io->channel; 790 qemu_log_mask(LOG_GUEST_ERROR, "%s: use of unassigned channel %d\n", 791 __func__, ch->channel); 792 } 793 794 void* DBDMA_init (MemoryRegion **dbdma_mem) 795 { 796 DBDMAState *s; 797 int i; 798 799 s = g_malloc0(sizeof(DBDMAState)); 800 801 for (i = 0; i < DBDMA_CHANNELS; i++) { 802 DBDMA_io *io = &s->channels[i].io; 803 DBDMA_channel *ch = &s->channels[i]; 804 qemu_iovec_init(&io->iov, 1); 805 806 ch->rw = dbdma_unassigned_rw; 807 ch->flush = dbdma_unassigned_flush; 808 ch->channel = i; 809 ch->io.channel = ch; 810 } 811 812 memory_region_init_io(&s->mem, NULL, &dbdma_ops, s, "dbdma", 0x1000); 813 *dbdma_mem = &s->mem; 814 vmstate_register(NULL, -1, &vmstate_dbdma, s); 815 qemu_register_reset(dbdma_reset, s); 816 817 s->bh = qemu_bh_new(DBDMA_run_bh, s); 818 819 return s; 820 } 821